From 78bb94be4807808014689483c99fb196a4f7f5d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=85=89=E6=98=A5?= Date: Sat, 13 Aug 2022 11:10:23 +0800 Subject: [PATCH] - update --- .gitignore | 3 +- .../aliyun/aliyun-oss-go-sdk/LICENSE | 14 + .../aliyun/aliyun-oss-go-sdk/oss/auth.go | 345 + .../aliyun/aliyun-oss-go-sdk/oss/bucket.go | 1297 + .../aliyun/aliyun-oss-go-sdk/oss/client.go | 2366 + .../aliyun/aliyun-oss-go-sdk/oss/conf.go | 229 + .../aliyun/aliyun-oss-go-sdk/oss/conn.go | 916 + .../aliyun/aliyun-oss-go-sdk/oss/const.go | 264 + .../aliyun/aliyun-oss-go-sdk/oss/crc.go | 123 + .../aliyun/aliyun-oss-go-sdk/oss/download.go | 567 + .../aliyun/aliyun-oss-go-sdk/oss/error.go | 94 + .../aliyun-oss-go-sdk/oss/limit_reader_1_6.go | 28 + .../aliyun-oss-go-sdk/oss/limit_reader_1_7.go | 90 + .../aliyun-oss-go-sdk/oss/livechannel.go | 257 + .../aliyun/aliyun-oss-go-sdk/oss/mime.go | 572 + .../aliyun/aliyun-oss-go-sdk/oss/model.go | 69 + .../aliyun/aliyun-oss-go-sdk/oss/multicopy.go | 474 + .../aliyun/aliyun-oss-go-sdk/oss/multipart.go | 305 + .../aliyun/aliyun-oss-go-sdk/oss/option.go | 689 + .../aliyun/aliyun-oss-go-sdk/oss/progress.go | 116 + .../aliyun-oss-go-sdk/oss/redirect_1_6.go | 11 + .../aliyun-oss-go-sdk/oss/redirect_1_7.go | 12 + .../aliyun-oss-go-sdk/oss/select_object.go | 197 + .../oss/select_object_type.go | 364 + .../aliyun-oss-go-sdk/oss/transport_1_6.go | 41 + .../aliyun-oss-go-sdk/oss/transport_1_7.go | 44 + .../aliyun/aliyun-oss-go-sdk/oss/type.go | 1306 + .../aliyun/aliyun-oss-go-sdk/oss/upload.go | 552 + .../aliyun/aliyun-oss-go-sdk/oss/utils.go | 534 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../github.com/aws/aws-sdk-go/aws/arn/arn.go | 93 + .../aws/aws-sdk-go/aws/awserr/error.go | 164 + .../aws/aws-sdk-go/aws/awserr/types.go | 221 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 123 + .../aws-sdk-go/aws/awsutil/string_value.go | 90 + .../aws/aws-sdk-go/aws/client/client.go | 94 + .../aws-sdk-go/aws/client/default_retryer.go | 177 + .../aws/aws-sdk-go/aws/client/logger.go | 206 + .../aws/client/metadata/client_info.go | 15 + .../aws-sdk-go/aws/client/no_op_retryer.go | 28 + .../github.com/aws/aws-sdk-go/aws/config.go | 631 + .../aws/aws-sdk-go/aws/context_1_5.go | 38 + .../aws/aws-sdk-go/aws/context_1_9.go | 12 + .../aws-sdk-go/aws/context_background_1_5.go | 23 + .../aws-sdk-go/aws/context_background_1_7.go | 21 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 918 + .../aws-sdk-go/aws/corehandlers/handlers.go | 232 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 100 + .../credentials/context_background_go1.5.go | 23 + .../credentials/context_background_go1.7.go | 21 + .../aws/credentials/context_go1.5.go | 40 + .../aws/credentials/context_go1.9.go | 14 + .../aws-sdk-go/aws/credentials/credentials.go | 383 + .../ec2rolecreds/ec2_role_provider.go | 188 + .../aws/credentials/endpointcreds/provider.go | 210 + .../aws/credentials/env_provider.go | 74 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../aws/credentials/processcreds/provider.go | 426 + .../shared_credentials_provider.go | 151 + .../aws/credentials/ssocreds/doc.go | 60 + .../aws-sdk-go/aws/credentials/ssocreds/os.go | 10 + .../aws/credentials/ssocreds/os_windows.go | 7 + .../aws/credentials/ssocreds/provider.go | 180 + .../aws/credentials/static_provider.go | 57 + .../stscreds/assume_role_provider.go | 367 + .../stscreds/web_identity_provider.go | 182 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 + .../aws/aws-sdk-go/aws/csm/enable.go | 89 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 264 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 250 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 245 + .../aws/ec2metadata/token_provider.go | 93 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 193 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 32415 ++++++++++++ .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 706 + .../aws/endpoints/legacy_regions.go | 24 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 594 + .../aws/endpoints/v3model_codegen.go | 412 + .../github.com/aws/aws-sdk-go/aws/errors.go | 13 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 121 + .../aws/request/connection_reset_error.go | 19 + .../aws/aws-sdk-go/aws/request/handlers.go | 346 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 65 + .../aws/aws-sdk-go/aws/request/request.go | 722 + .../aws/aws-sdk-go/aws/request/request_1_7.go | 40 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 37 + .../aws-sdk-go/aws/request/request_context.go | 15 + .../aws/request/request_context_1_6.go | 15 + .../aws/request/request_pagination.go | 266 + .../aws/aws-sdk-go/aws/request/retryer.go | 309 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 286 + .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/aws-sdk-go/aws/session/credentials.go | 303 + .../aws/session/custom_transport.go | 28 + .../aws/session/custom_transport_go1.12.go | 27 + .../aws/session/custom_transport_go1.5.go | 23 + .../aws/session/custom_transport_go1.6.go | 24 + .../aws/aws-sdk-go/aws/session/doc.go | 367 + .../aws/aws-sdk-go/aws/session/env_config.go | 471 + .../aws/aws-sdk-go/aws/session/session.go | 997 + .../aws-sdk-go/aws/session/shared_config.go | 729 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 81 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/signer/v4/request_context_go1.5.go | 14 + .../aws/signer/v4/request_context_go1.7.go | 14 + .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 25 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 854 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 264 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 13 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 30 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../internal/context/background_go1.5.go | 41 + .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 42 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 18 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 350 + .../aws-sdk-go/internal/ini/literal_tokens.go | 340 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 169 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../internal/s3shared/arn/accesspoint_arn.go | 50 + .../aws-sdk-go/internal/s3shared/arn/arn.go | 94 + .../internal/s3shared/arn/outpost_arn.go | 126 + .../s3shared/arn/s3_object_lambda_arn.go | 15 + .../internal/s3shared/endpoint_errors.go | 202 + .../internal/s3shared/resource_request.go | 45 + .../internal/s3shared/s3err/error.go | 57 + .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 11 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 13 + .../aws/aws-sdk-go/internal/sdkmath/floor.go | 16 + .../internal/sdkmath/floor_go1.9.go | 57 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkrand/read.go | 12 + .../aws-sdk-go/internal/sdkrand/read_1_5.go | 25 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 40 + .../aws-sdk-go/internal/strings/strings.go | 11 + .../internal/sync/singleflight/LICENSE | 27 + .../sync/singleflight/singleflight.go | 120 + .../private/checksum/content_md5.go | 53 + .../private/protocol/eventstream/debug.go | 144 + .../private/protocol/eventstream/decode.go | 216 + .../private/protocol/eventstream/encode.go | 162 + .../private/protocol/eventstream/error.go | 23 + .../eventstream/eventstreamapi/error.go | 81 + .../eventstream/eventstreamapi/reader.go | 173 + .../eventstream/eventstreamapi/shared.go | 23 + .../eventstream/eventstreamapi/signer.go | 123 + .../eventstreamapi/stream_writer.go | 129 + .../eventstream/eventstreamapi/transport.go | 10 + .../eventstreamapi/transport_go1.17.go | 19 + .../eventstream/eventstreamapi/writer.go | 63 + .../private/protocol/eventstream/header.go | 175 + .../protocol/eventstream/header_value.go | 506 + .../private/protocol/eventstream/message.go | 117 + .../aws/aws-sdk-go/private/protocol/host.go | 104 + .../private/protocol/host_prefix.go | 54 + .../private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 298 + .../protocol/json/jsonutil/unmarshal.go | 304 + .../private/protocol/jsonrpc/jsonrpc.go | 87 + .../protocol/jsonrpc/unmarshal_error.go | 107 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../aws-sdk-go/private/protocol/protocol.go | 49 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 246 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 69 + .../aws-sdk-go/private/protocol/rest/build.go | 333 + .../private/protocol/rest/payload.go | 54 + .../private/protocol/rest/unmarshal.go | 264 + .../private/protocol/restjson/restjson.go | 59 + .../protocol/restjson/unmarshal_error.go | 134 + .../private/protocol/restxml/restxml.go | 79 + .../aws-sdk-go/private/protocol/timestamp.go | 134 + .../aws-sdk-go/private/protocol/unmarshal.go | 27 + .../private/protocol/unmarshal_error.go | 65 + .../private/protocol/xml/xmlutil/build.go | 317 + .../private/protocol/xml/xmlutil/sort.go | 32 + .../private/protocol/xml/xmlutil/unmarshal.go | 299 + .../protocol/xml/xmlutil/xml_to_struct.go | 173 + .../aws/aws-sdk-go/service/s3/api.go | 42213 ++++++++++++++++ .../aws/aws-sdk-go/service/s3/body_hash.go | 202 + .../aws-sdk-go/service/s3/bucket_location.go | 107 + .../aws-sdk-go/service/s3/customizations.go | 89 + .../aws/aws-sdk-go/service/s3/doc.go | 26 + .../aws/aws-sdk-go/service/s3/doc_custom.go | 110 + .../aws/aws-sdk-go/service/s3/endpoint.go | 299 + .../aws-sdk-go/service/s3/endpoint_builder.go | 242 + .../aws/aws-sdk-go/service/s3/errors.go | 60 + .../service/s3/host_style_bucket.go | 136 + .../service/s3/platform_handlers.go | 9 + .../service/s3/platform_handlers_go1.6.go | 29 + .../aws/aws-sdk-go/service/s3/service.go | 107 + .../aws/aws-sdk-go/service/s3/sse.go | 84 + .../aws-sdk-go/service/s3/statusok_error.go | 47 + .../aws-sdk-go/service/s3/unmarshal_error.go | 114 + .../aws/aws-sdk-go/service/s3/waiters.go | 214 + .../aws/aws-sdk-go/service/sso/api.go | 1354 + .../aws/aws-sdk-go/service/sso/doc.go | 44 + .../aws/aws-sdk-go/service/sso/errors.go | 44 + .../aws/aws-sdk-go/service/sso/service.go | 105 + .../service/sso/ssoiface/interface.go | 86 + .../aws/aws-sdk-go/service/sts/api.go | 3445 ++ .../aws-sdk-go/service/sts/customizations.go | 11 + .../aws/aws-sdk-go/service/sts/doc.go | 32 + .../aws/aws-sdk-go/service/sts/errors.go | 84 + .../aws/aws-sdk-go/service/sts/service.go | 103 + .../service/sts/stsiface/interface.go | 96 + vendor/github.com/baidubce/bce-sdk-go/LICENSE | 177 + .../baidubce/bce-sdk-go/auth/credentials.go | 62 + .../baidubce/bce-sdk-go/auth/signer.go | 184 + .../baidubce/bce-sdk-go/bce/builder.go | 189 + .../baidubce/bce-sdk-go/bce/client.go | 267 + .../baidubce/bce-sdk-go/bce/config.go | 80 + .../baidubce/bce-sdk-go/bce/error.go | 64 + .../baidubce/bce-sdk-go/bce/request.go | 219 + .../baidubce/bce-sdk-go/bce/response.go | 128 + .../baidubce/bce-sdk-go/bce/retry.go | 118 + .../baidubce/bce-sdk-go/http/client.go | 174 + .../baidubce/bce-sdk-go/http/constants.go | 83 + .../baidubce/bce-sdk-go/http/request.go | 221 + .../baidubce/bce-sdk-go/http/response.go | 74 + .../bce-sdk-go/services/bos/api/bucket.go | 1090 + .../bce-sdk-go/services/bos/api/model.go | 580 + .../bce-sdk-go/services/bos/api/multipart.go | 421 + .../bce-sdk-go/services/bos/api/object.go | 949 + .../bce-sdk-go/services/bos/api/util.go | 274 + .../bce-sdk-go/services/bos/client.go | 2188 + .../baidubce/bce-sdk-go/util/log/logger.go | 393 + .../baidubce/bce-sdk-go/util/log/util.go | 227 + .../baidubce/bce-sdk-go/util/string.go | 88 + .../baidubce/bce-sdk-go/util/time.go | 46 + vendor/github.com/clbanning/mxj/LICENSE | 55 + vendor/github.com/clbanning/mxj/anyxml.go | 189 + .../clbanning/mxj/atomFeedString.xml | 54 + vendor/github.com/clbanning/mxj/doc.go | 134 + .../github.com/clbanning/mxj/escapechars.go | 54 + vendor/github.com/clbanning/mxj/exists.go | 7 + vendor/github.com/clbanning/mxj/files.go | 287 + .../clbanning/mxj/files_test.badjson | 2 + .../clbanning/mxj/files_test.badxml | 9 + .../github.com/clbanning/mxj/files_test.json | 2 + .../github.com/clbanning/mxj/files_test.xml | 9 + .../clbanning/mxj/files_test_dup.json | 1 + .../clbanning/mxj/files_test_dup.xml | 1 + .../clbanning/mxj/files_test_indent.json | 12 + .../clbanning/mxj/files_test_indent.xml | 8 + vendor/github.com/clbanning/mxj/gob.go | 35 + vendor/github.com/clbanning/mxj/json.go | 323 + vendor/github.com/clbanning/mxj/keyvalues.go | 671 + vendor/github.com/clbanning/mxj/leafnode.go | 112 + vendor/github.com/clbanning/mxj/misc.go | 86 + vendor/github.com/clbanning/mxj/mxj.go | 128 + vendor/github.com/clbanning/mxj/newmap.go | 184 + vendor/github.com/clbanning/mxj/readme.md | 179 + vendor/github.com/clbanning/mxj/remove.go | 37 + vendor/github.com/clbanning/mxj/rename.go | 54 + vendor/github.com/clbanning/mxj/set.go | 26 + .../github.com/clbanning/mxj/setfieldsep.go | 20 + vendor/github.com/clbanning/mxj/songtext.xml | 29 + vendor/github.com/clbanning/mxj/strict.go | 30 + vendor/github.com/clbanning/mxj/struct.go | 54 + .../github.com/clbanning/mxj/updatevalues.go | 256 + vendor/github.com/clbanning/mxj/xml.go | 1139 + vendor/github.com/clbanning/mxj/xmlseq.go | 828 + .../github.com/deckarep/golang-set/.gitignore | 22 + vendor/github.com/deckarep/golang-set/LICENSE | 22 + .../github.com/deckarep/golang-set/README.md | 95 + .../deckarep/golang-set/iterator.go | 58 + vendor/github.com/deckarep/golang-set/set.go | 217 + .../deckarep/golang-set/threadsafe.go | 283 + .../deckarep/golang-set/threadunsafe.go | 340 + .../github.com/google/go-querystring/LICENSE | 27 + .../google/go-querystring/query/encode.go | 357 + .../huaweicloud-sdk-go-obs/LICENSE | 201 + .../huaweicloud-sdk-go-obs/obs/auth.go | 320 + .../huaweicloud-sdk-go-obs/obs/authV2.go | 55 + .../huaweicloud-sdk-go-obs/obs/authV4.go | 137 + .../huaweicloud-sdk-go-obs/obs/client_base.go | 61 + .../obs/client_bucket.go | 731 + .../obs/client_object.go | 408 + .../obs/client_other.go | 49 + .../huaweicloud-sdk-go-obs/obs/client_part.go | 250 + .../obs/client_resume.go | 59 + .../huaweicloud-sdk-go-obs/obs/conf.go | 497 + .../huaweicloud-sdk-go-obs/obs/const.go | 969 + .../huaweicloud-sdk-go-obs/obs/convert.go | 1114 + .../huaweicloud-sdk-go-obs/obs/error.go | 34 + .../huaweicloud-sdk-go-obs/obs/extension.go | 41 + .../huaweicloud-sdk-go-obs/obs/http.go | 637 + .../huaweicloud-sdk-go-obs/obs/log.go | 330 + .../huaweicloud-sdk-go-obs/obs/model_base.go | 363 + .../obs/model_bucket.go | 356 + .../obs/model_header.go | 32 + .../obs/model_object.go | 345 + .../huaweicloud-sdk-go-obs/obs/model_other.go | 63 + .../huaweicloud-sdk-go-obs/obs/model_part.go | 170 + .../obs/model_response.go | 67 + .../huaweicloud-sdk-go-obs/obs/pool.go | 542 + .../huaweicloud-sdk-go-obs/obs/provider.go | 243 + .../obs/temporary_createSignedUrl.go | 53 + .../obs/temporary_other.go | 116 + .../obs/temporary_signedUrl.go | 758 + .../huaweicloud-sdk-go-obs/obs/trait.go | 968 + .../huaweicloud-sdk-go-obs/obs/transfer.go | 874 + .../huaweicloud-sdk-go-obs/obs/util.go | 551 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 28 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 51 + .../github.com/jmespath/go-jmespath/README.md | 87 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../ks3sdklib/aws-sdk-go/LICENSE.txt | 202 + .../ks3sdklib/aws-sdk-go/NOTICE.txt | 3 + .../ks3sdklib/aws-sdk-go/aws/awserr/error.go | 88 + .../ks3sdklib/aws-sdk-go/aws/awsutil/copy.go | 96 + .../aws-sdk-go/aws/awsutil/path_value.go | 175 + .../aws-sdk-go/aws/awsutil/string_value.go | 103 + .../ks3sdklib/aws-sdk-go/aws/awsutil/util.go | 133 + .../ks3sdklib/aws-sdk-go/aws/config.go | 173 + .../aws/credentials/chain_provider.go | 81 + .../aws-sdk-go/aws/credentials/credentials.go | 178 + .../aws/credentials/ec2_role_provider.go | 173 + .../aws/credentials/env_provider.go | 67 + .../aws-sdk-go/aws/credentials/example.ini | 8 + .../aws-sdk-go/aws/credentials/ini.go | 123 + .../shared_credentials_provider.go | 131 + .../aws/credentials/static_provider.go | 42 + .../aws-sdk-go/aws/handler_functions.go | 154 + .../ks3sdklib/aws-sdk-go/aws/handlers.go | 85 + .../aws-sdk-go/aws/param_validator.go | 89 + .../ks3sdklib/aws-sdk-go/aws/request.go | 310 + .../ks3sdklib/aws-sdk-go/aws/service.go | 176 + .../ks3sdklib/aws-sdk-go/aws/types.go | 131 + .../ks3sdklib/aws-sdk-go/aws/version.go | 8 + .../aws-sdk-go/internal/apierr/error.go | 139 + .../internal/endpoints/endpoints.go | 31 + .../internal/endpoints/endpoints.json | 77 + .../internal/endpoints/endpoints_map.go | 89 + .../internal/protocol/query/build.go | 33 + .../protocol/query/queryutil/queryutil.go | 223 + .../internal/protocol/query/unmarshal.go | 29 + .../protocol/query/unmarshal_error.go | 44 + .../internal/protocol/rest/build.go | 225 + .../internal/protocol/rest/payload.go | 45 + .../internal/protocol/rest/unmarshal.go | 174 + .../internal/protocol/restxml/restxml.go | 54 + .../internal/protocol/xml/xmlutil/build.go | 287 + .../protocol/xml/xmlutil/unmarshal.go | 263 + .../protocol/xml/xmlutil/xml_to_struct.go | 105 + .../aws-sdk-go/internal/signer/v2/v2.go | 348 + .../ks3sdklib/aws-sdk-go/service/s3/api.go | 6486 +++ .../ks3sdklib/aws-sdk-go/service/s3/const.go | 19 + .../ks3sdklib/aws-sdk-go/service/s3/errors.go | 60 + .../service/s3/host_style_bucket.go | 53 + .../aws-sdk-go/service/s3/service.go | 57 + .../ks3sdklib/aws-sdk-go/service/s3/try.go | 35 + .../mitchellh/mapstructure/CHANGELOG.md | 96 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 279 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1540 + .../mozillazg/go-httpheader/.bumpversion.cfg | 7 + .../mozillazg/go-httpheader/.gitignore | 27 + .../mozillazg/go-httpheader/CHANGELOG.md | 28 + .../mozillazg/go-httpheader/LICENSE | 21 + .../mozillazg/go-httpheader/Makefile | 15 + .../mozillazg/go-httpheader/README.md | 67 + .../mozillazg/go-httpheader/decode.go | 267 + .../mozillazg/go-httpheader/encode.go | 297 + vendor/github.com/qiniu/go-sdk/v7/.gitignore | 35 + .../github.com/qiniu/go-sdk/v7/CHANGELOG.md | 56 + vendor/github.com/qiniu/go-sdk/v7/LICENSE | 21 + vendor/github.com/qiniu/go-sdk/v7/Makefile | 5 + vendor/github.com/qiniu/go-sdk/v7/OWNERS | 14 + vendor/github.com/qiniu/go-sdk/v7/README.md | 44 + .../qiniu/go-sdk/v7/auth/context.go | 36 + .../qiniu/go-sdk/v7/auth/credentials.go | 222 + .../qiniu/go-sdk/v7/auth/qbox/doc.go | 2 + .../qiniu/go-sdk/v7/auth/qbox/qbox_auth.go | 28 + .../github.com/qiniu/go-sdk/v7/auth/signer.go | 10 + .../qiniu/go-sdk/v7/client/client.go | 381 + .../qiniu/go-sdk/v7/client/header.go | 25 + .../qiniu/go-sdk/v7/client/json_decode.go | 35 + .../github.com/qiniu/go-sdk/v7/conf/conf.go | 23 + vendor/github.com/qiniu/go-sdk/v7/conf/doc.go | 2 + vendor/github.com/qiniu/go-sdk/v7/doc.go | 12 + vendor/github.com/qiniu/go-sdk/v7/err.go | 22 + .../qiniu/go-sdk/v7/internal/log/logger.go | 76 + .../github.com/qiniu/go-sdk/v7/reqid/reqid.go | 20 + .../go-sdk/v7/storage/backward_compatible.go | 33 + .../qiniu/go-sdk/v7/storage/base64_upload.go | 152 + .../qiniu/go-sdk/v7/storage/bucket.go | 999 + .../qiniu/go-sdk/v7/storage/config.go | 85 + .../github.com/qiniu/go-sdk/v7/storage/doc.go | 10 + .../qiniu/go-sdk/v7/storage/errs.go | 13 + .../qiniu/go-sdk/v7/storage/form_upload.go | 364 + .../qiniu/go-sdk/v7/storage/pfop.go | 212 + .../qiniu/go-sdk/v7/storage/recorder.go | 103 + .../qiniu/go-sdk/v7/storage/region.go | 579 + .../go-sdk/v7/storage/resume_uploader.go | 437 + .../go-sdk/v7/storage/resume_uploader_apis.go | 262 + .../go-sdk/v7/storage/resume_uploader_base.go | 287 + .../v7/storage/resume_uploader_deprecated.go | 91 + .../go-sdk/v7/storage/resume_uploader_v2.go | 397 + .../qiniu/go-sdk/v7/storage/token.go | 156 + .../github.com/qiniu/go-sdk/v7/storage/uc.go | 595 + .../qiniu/go-sdk/v7/storage/uploader_base.go | 23 + .../qiniu/go-sdk/v7/storage/util.go | 22 + .../qiniu/go-sdk/v7/storage/zone.go | 50 + vendor/github.com/qiniu/go-sdk/v7/types.go | 40 + .../tencentyun/cos-go-sdk-v5/.bumpversion.cfg | 7 + .../tencentyun/cos-go-sdk-v5/.gitignore | 32 + .../tencentyun/cos-go-sdk-v5/.travis.yml | 32 + .../tencentyun/cos-go-sdk-v5/CHANGELOG.md | 628 + .../tencentyun/cos-go-sdk-v5/LICENSE | 21 + .../tencentyun/cos-go-sdk-v5/Makefile | 22 + .../tencentyun/cos-go-sdk-v5/README.md | 106 + .../tencentyun/cos-go-sdk-v5/auth.go | 550 + .../tencentyun/cos-go-sdk-v5/batch.go | 262 + .../tencentyun/cos-go-sdk-v5/bucket.go | 199 + .../cos-go-sdk-v5/bucket_accelerate.go | 37 + .../tencentyun/cos-go-sdk-v5/bucket_acl.go | 65 + .../tencentyun/cos-go-sdk-v5/bucket_cors.go | 71 + .../tencentyun/cos-go-sdk-v5/bucket_domain.go | 53 + .../cos-go-sdk-v5/bucket_encryption.go | 51 + .../bucket_intelligenttiering.go | 47 + .../cos-go-sdk-v5/bucket_inventory.go | 132 + .../cos-go-sdk-v5/bucket_lifecycle.go | 127 + .../cos-go-sdk-v5/bucket_location.go | 28 + .../cos-go-sdk-v5/bucket_logging.go | 50 + .../tencentyun/cos-go-sdk-v5/bucket_origin.go | 91 + .../tencentyun/cos-go-sdk-v5/bucket_part.go | 57 + .../tencentyun/cos-go-sdk-v5/bucket_policy.go | 71 + .../cos-go-sdk-v5/bucket_referer.go | 40 + .../cos-go-sdk-v5/bucket_replication.go | 69 + .../cos-go-sdk-v5/bucket_tagging.go | 69 + .../cos-go-sdk-v5/bucket_version.go | 42 + .../cos-go-sdk-v5/bucket_website.go | 71 + .../github.com/tencentyun/cos-go-sdk-v5/ci.go | 1834 + .../tencentyun/cos-go-sdk-v5/ci_doc.go | 283 + .../tencentyun/cos-go-sdk-v5/ci_media.go | 2140 + .../tencentyun/cos-go-sdk-v5/cos.go | 563 + .../tencentyun/cos-go-sdk-v5/doc.go | 3 + .../tencentyun/cos-go-sdk-v5/error.go | 96 + .../tencentyun/cos-go-sdk-v5/helper.go | 390 + .../tencentyun/cos-go-sdk-v5/object.go | 1617 + .../tencentyun/cos-go-sdk-v5/object_acl.go | 66 + .../tencentyun/cos-go-sdk-v5/object_part.go | 521 + .../tencentyun/cos-go-sdk-v5/object_select.go | 445 + .../tencentyun/cos-go-sdk-v5/progress.go | 160 + .../tencentyun/cos-go-sdk-v5/service.go | 35 + vendor/github.com/upyun/go-sdk/v3/LICENSE | 21 + .../github.com/upyun/go-sdk/v3/upyun/auth.go | 84 + .../upyun/go-sdk/v3/upyun/errors.go | 78 + .../upyun/go-sdk/v3/upyun/fileinfo.go | 72 + .../github.com/upyun/go-sdk/v3/upyun/form.go | 148 + .../github.com/upyun/go-sdk/v3/upyun/http.go | 82 + vendor/github.com/upyun/go-sdk/v3/upyun/io.go | 78 + .../upyun/go-sdk/v3/upyun/process.go | 230 + .../github.com/upyun/go-sdk/v3/upyun/purge.go | 53 + .../github.com/upyun/go-sdk/v3/upyun/rest.go | 741 + .../github.com/upyun/go-sdk/v3/upyun/upyun.go | 61 + .../github.com/upyun/go-sdk/v3/upyun/utils.go | 234 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../x/sync/singleflight/singleflight.go | 212 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 419 + vendor/modules.txt | 125 + 517 files changed, 182540 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/LICENSE create mode 100644 vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/auth/signer.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/builder.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/config.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/error.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/request.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/response.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/bce/retry.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/constants.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/request.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/http/response.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/log/util.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/string.go create mode 100644 vendor/github.com/baidubce/bce-sdk-go/util/time.go create mode 100644 vendor/github.com/clbanning/mxj/LICENSE create mode 100644 vendor/github.com/clbanning/mxj/anyxml.go create mode 100644 vendor/github.com/clbanning/mxj/atomFeedString.xml create mode 100644 vendor/github.com/clbanning/mxj/doc.go create mode 100644 vendor/github.com/clbanning/mxj/escapechars.go create mode 100644 vendor/github.com/clbanning/mxj/exists.go create mode 100644 vendor/github.com/clbanning/mxj/files.go create mode 100644 vendor/github.com/clbanning/mxj/files_test.badjson create mode 100644 vendor/github.com/clbanning/mxj/files_test.badxml create mode 100644 vendor/github.com/clbanning/mxj/files_test.json create mode 100644 vendor/github.com/clbanning/mxj/files_test.xml create mode 100644 vendor/github.com/clbanning/mxj/files_test_dup.json create mode 100644 vendor/github.com/clbanning/mxj/files_test_dup.xml create mode 100644 vendor/github.com/clbanning/mxj/files_test_indent.json create mode 100644 vendor/github.com/clbanning/mxj/files_test_indent.xml create mode 100644 vendor/github.com/clbanning/mxj/gob.go create mode 100644 vendor/github.com/clbanning/mxj/json.go create mode 100644 vendor/github.com/clbanning/mxj/keyvalues.go create mode 100644 vendor/github.com/clbanning/mxj/leafnode.go create mode 100644 vendor/github.com/clbanning/mxj/misc.go create mode 100644 vendor/github.com/clbanning/mxj/mxj.go create mode 100644 vendor/github.com/clbanning/mxj/newmap.go create mode 100644 vendor/github.com/clbanning/mxj/readme.md create mode 100644 vendor/github.com/clbanning/mxj/remove.go create mode 100644 vendor/github.com/clbanning/mxj/rename.go create mode 100644 vendor/github.com/clbanning/mxj/set.go create mode 100644 vendor/github.com/clbanning/mxj/setfieldsep.go create mode 100644 vendor/github.com/clbanning/mxj/songtext.xml create mode 100644 vendor/github.com/clbanning/mxj/strict.go create mode 100644 vendor/github.com/clbanning/mxj/struct.go create mode 100644 vendor/github.com/clbanning/mxj/updatevalues.go create mode 100644 vendor/github.com/clbanning/mxj/xml.go create mode 100644 vendor/github.com/clbanning/mxj/xmlseq.go create mode 100644 vendor/github.com/deckarep/golang-set/.gitignore create mode 100644 vendor/github.com/deckarep/golang-set/LICENSE create mode 100644 vendor/github.com/deckarep/golang-set/README.md create mode 100644 vendor/github.com/deckarep/golang-set/iterator.go create mode 100644 vendor/github.com/deckarep/golang-set/set.go create mode 100644 vendor/github.com/deckarep/golang-set/threadsafe.go create mode 100644 vendor/github.com/deckarep/golang-set/threadunsafe.go create mode 100644 vendor/github.com/google/go-querystring/LICENSE create mode 100644 vendor/github.com/google/go-querystring/query/encode.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go create mode 100644 vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/util.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ec2_role_provider.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ini.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/handler_functions.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/handlers.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/param_validator.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/request.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/service.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/apierr/error.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.json create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints_map.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/build.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/build.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/payload.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml/restxml.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/internal/signer/v2/v2.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/const.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/try.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/mozillazg/go-httpheader/.bumpversion.cfg create mode 100644 vendor/github.com/mozillazg/go-httpheader/.gitignore create mode 100644 vendor/github.com/mozillazg/go-httpheader/CHANGELOG.md create mode 100644 vendor/github.com/mozillazg/go-httpheader/LICENSE create mode 100644 vendor/github.com/mozillazg/go-httpheader/Makefile create mode 100644 vendor/github.com/mozillazg/go-httpheader/README.md create mode 100644 vendor/github.com/mozillazg/go-httpheader/decode.go create mode 100644 vendor/github.com/mozillazg/go-httpheader/encode.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/.gitignore create mode 100644 vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md create mode 100644 vendor/github.com/qiniu/go-sdk/v7/LICENSE create mode 100644 vendor/github.com/qiniu/go-sdk/v7/Makefile create mode 100644 vendor/github.com/qiniu/go-sdk/v7/OWNERS create mode 100644 vendor/github.com/qiniu/go-sdk/v7/README.md create mode 100644 vendor/github.com/qiniu/go-sdk/v7/auth/context.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/auth/credentials.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/auth/qbox/doc.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/auth/qbox/qbox_auth.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/auth/signer.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/client/client.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/client/header.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/client/json_decode.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/conf/conf.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/conf/doc.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/doc.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/err.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/internal/log/logger.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/reqid/reqid.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/backward_compatible.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/base64_upload.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/bucket.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/config.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/doc.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/errs.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/form_upload.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/pfop.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/recorder.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/region.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_apis.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_base.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_deprecated.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_v2.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/token.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/uc.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/uploader_base.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/util.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/storage/zone.go create mode 100644 vendor/github.com/qiniu/go-sdk/v7/types.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/.bumpversion.cfg create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/.gitignore create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/.travis.yml create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/CHANGELOG.md create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/LICENSE create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/Makefile create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/README.md create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/auth.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/batch.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_accelerate.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_acl.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_cors.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_domain.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_encryption.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_intelligenttiering.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_inventory.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_lifecycle.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_location.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_logging.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_origin.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_part.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_policy.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_referer.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_replication.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_tagging.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_version.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_website.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/ci.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/ci_doc.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/ci_media.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/cos.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/doc.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/error.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/helper.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/object.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/object_acl.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/object_part.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/object_select.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/progress.go create mode 100644 vendor/github.com/tencentyun/cos-go-sdk-v5/service.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/LICENSE create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/auth.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/errors.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/fileinfo.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/form.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/http.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/io.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/process.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/purge.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/rest.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/upyun.go create mode 100644 vendor/github.com/upyun/go-sdk/v3/upyun/utils.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore index 0dae164..cc7eec5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,5 +5,4 @@ .vscode *.log gomod.sh -aliyun_test.go -/vendor/ +aliyun_test.go \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE new file mode 100644 index 0000000..d46e9d1 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE @@ -0,0 +1,14 @@ +Copyright (c) 2015 aliyun.com + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 0000000..9cb9c3c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,345 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + "net/http" + "sort" + "strconv" + "strings" + "time" +) + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// getAdditionalHeaderKeys get exist key in http header +func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) { + var keysList []string + keysMap := make(map[string]string) + srcKeys := make(map[string]string) + + for k := range req.Header { + srcKeys[strings.ToLower(k)] = "" + } + + for _, v := range conn.config.AdditionalHeaders { + if _, ok := srcKeys[strings.ToLower(v)]; ok { + keysMap[strings.ToLower(v)] = "" + } + } + + for k := range keysMap { + keysList = append(keysList, k) + } + sort.Strings(keysList) + return keysList, keysMap +} + +// getAdditionalHeaderKeysV4 get exist key in http header +func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) { + var keysList []string + keysMap := make(map[string]string) + srcKeys := make(map[string]string) + + for k := range req.Header { + srcKeys[strings.ToLower(k)] = "" + } + + for _, v := range conn.config.AdditionalHeaders { + if _, ok := srcKeys[strings.ToLower(v)]; ok { + if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) { + keysMap[strings.ToLower(v)] = "" + } + } + } + + for k := range keysMap { + keysList = append(keysList, k) + } + sort.Strings(keysList) + return keysList, keysMap +} + +// signHeader signs the header and sets it as the authorization header. +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + akIf := conn.config.GetCredentials() + authorizationStr := "" + if conn.config.AuthVersion == AuthV4 { + strDay := "" + strDate := req.Header.Get(HttpHeaderOssDate) + if strDate == "" { + strDate = req.Header.Get(HTTPHeaderDate) + t, _ := time.Parse(http.TimeFormat, strDate) + strDay = t.Format("20060102") + } else { + t, _ := time.Parse(iso8601DateFormatSecond, strDate) + strDay = t.Format("20060102") + } + + signHeaderProduct := conn.config.GetSignProduct() + signHeaderRegion := conn.config.GetSignRegion() + + additionalList, _ := conn.getAdditionalHeaderKeysV4(req) + if len(additionalList) > 0 { + authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v" + additionnalHeadersStr := strings.Join(additionalList, ";") + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } else { + authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v" + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } + } else if conn.config.AuthVersion == AuthV2 { + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v" + additionnalHeadersStr := strings.Join(additionalList, ";") + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } else { + authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v" + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } + } else { + // Get the final authorization string + authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + } + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string { + // Find out the "x-oss-"'s address in header of the request + ossHeadersMap := make(map[string]string) + additionalList, additionalMap := conn.getAdditionalHeaderKeys(req) + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + ossHeadersMap[strings.ToLower(k)] = v[0] + } else if conn.config.AuthVersion == AuthV2 { + if _, ok := additionalMap[strings.ToLower(k)]; ok { + ossHeadersMap[strings.ToLower(k)] = v[0] + } + } + } + hs := newHeaderSorter(ossHeadersMap) + + // Sort the ossHeadersMap by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + // default is v1 signature + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + + // v2 signature + if conn.config.AuthVersion == AuthV2 { + signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource + h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret)) + } + + if conn.config.LogLevel >= Debug { + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr)) + } + + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string) string { + // Find out the "x-oss-"'s address in header of the request + ossHeadersMap := make(map[string]string) + additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req) + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ") + } else { + if _, ok := additionalMap[strings.ToLower(k)]; ok { + ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ") + } + } + } + + // Required parameters + signDate := "" + dateFormat := "" + date := req.Header.Get(HTTPHeaderDate) + if date != "" { + signDate = date + dateFormat = http.TimeFormat + } + + ossDate := req.Header.Get(HttpHeaderOssDate) + _, ok := ossHeadersMap[strings.ToLower(HttpHeaderOssDate)] + if ossDate != "" { + signDate = ossDate + dateFormat = iso8601DateFormatSecond + if !ok { + ossHeadersMap[strings.ToLower(HttpHeaderOssDate)] = strings.Trim(ossDate, " ") + } + } + + contentType := req.Header.Get(HTTPHeaderContentType) + _, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentType)] + if contentType != "" && !ok { + ossHeadersMap[strings.ToLower(HTTPHeaderContentType)] = strings.Trim(contentType, " ") + } + + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + _, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)] + if contentMd5 != "" && !ok { + ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)] = strings.Trim(contentMd5, " ") + } + + hs := newHeaderSorter(ossHeadersMap) + + // Sort the ossHeadersMap by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + signStr := "" + + // v4 signature + hashedPayload := req.Header.Get(HttpHeaderOssContentSha256) + + // subResource + resource := canonicalizedResource + subResource := "" + subPos := strings.LastIndex(canonicalizedResource, "?") + if subPos != -1 { + subResource = canonicalizedResource[subPos+1:] + resource = canonicalizedResource[0:subPos] + } + + // get canonical request + canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload + rh := sha256.New() + io.WriteString(rh, canonicalReuqest) + hashedRequest := hex.EncodeToString(rh.Sum(nil)) + + if conn.config.LogLevel >= Debug { + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(canonicalReuqest)) + } + + // get day,eg 20210914 + t, _ := time.Parse(dateFormat, signDate) + strDay := t.Format("20060102") + + signedStrV4Product := conn.config.GetSignProduct() + signedStrV4Region := conn.config.GetSignRegion() + + signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest + if conn.config.LogLevel >= Debug { + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr)) + } + + h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret)) + io.WriteString(h1, strDay) + h1Key := h1.Sum(nil) + + h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key) + io.WriteString(h2, signedStrV4Region) + h2Key := h2.Sum(nil) + + h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key) + io.WriteString(h3, signedStrV4Product) + h3Key := h3.Sum(nil) + + h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key) + io.WriteString(h4, "aliyun_v4_request") + h4Key := h4.Sum(nil) + + h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key) + io.WriteString(h, signStr) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string { + if params[HTTPParamAccessKeyID] == nil { + return "" + } + + canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName) + canonParamsKeys := []string{} + for key := range params { + if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken { + canonParamsKeys = append(canonParamsKeys, key) + } + } + + sort.Strings(canonParamsKeys) + canonParamsStr := "" + for _, key := range canonParamsKeys { + canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string)) + } + + expireStr := strconv.FormatInt(expiration, 10) + signStr := expireStr + "\n" + canonParamsStr + canonResource + + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return signedStr +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 0000000..4eb2f44 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,1297 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// PutObject creates a new object and it will overwrite the original one if it exists already. +// +// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\". +// reader io.Reader instance for reading the data for uploading +// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding +// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details. +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := AddContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFile creates a new object from the local file. +// +// objectKey object key. +// filePath the local file path to upload. +// options the options for uploading the object. Refer to the parameter options in PutObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := AddContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObject does the actual upload work. +// +// request the request instance for uploading an object. +// options the options for uploading an object. +// +// Response the response from OSS. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := IsOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = AddContentType(options, request.ObjectKey) + } + + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObject downloads the object. +// +// objectKey the object key. +// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more details, please check out: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + + return result.Response, nil +} + +// GetObjectToFile downloads the data to a local file. +// +// objectKey the object key to download. +// filePath the local file to store the object data. +// options the options for downloading the object. Refer to the parameter options in method GetObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Calls the API to actually download the object. Returns the result instance. + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compares the CRC value + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs. +// +// request the request to download the object. +// options the options for downloading the file. Checks out the parameter options in method GetObject. +// +// GetObjectResult the result instance of getting the object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// CopyObject copies the object inside the bucket. +// +// srcObjectKey the source object to copy. +// destObjectKey the target object to copy. +// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch, +// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective. +// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details : +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// CopyObjectTo copies the object to another bucket. +// +// srcObjectKey source object key. The source bucket is Bucket.BucketName . +// destBucketName target bucket name. +// destObjectKey target object name. +// options copy options, check out parameter options in function CopyObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom copies the object to another bucket. +// +// srcBucketName source bucket name. +// srcObjectKey source object name. +// destObjectKey target object name. The target bucket name is Bucket.BucketName. +// options copy options. Check out parameter options in function CopyObject. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + if resp != nil { + *pRespHeader = resp.Headers + } + } + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AppendObject uploads the data in the way of appending an existing or new object. +// +// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file), +// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length. +// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536. +// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information). +// +// objectKey the target object to append to. +// reader io.Reader. The read instance for reading the data to append. +// appendPosition the start position to append. +// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL. +// +// int64 the next append position, it's valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// DoAppendObject is the actual API that does the object append. +// +// request the request object for appending object. +// options the options for appending object. +// +// AppendObjectResult the result object for appending object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := AddContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := IsOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := GetProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + if resp != nil { + *pRespHeader = resp.Headers + } + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.GetConfig().IsEnableCRC && isCRCSet { + err = CheckCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// DeleteObject deletes the object. +// +// objectKey the object key to delete. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// DeleteObjects deletes multiple objects. +// +// objectKeys the object keys to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + deletedResult := DeleteObjectVersionsResult{} + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &deletedResult); err == nil { + err = decodeDeleteObjectsResult(&deletedResult) + } + } + + // Keep compatibility:need convert to struct DeleteObjectsResult + out.XMLName = deletedResult.XMLName + for _, v := range deletedResult.DeletedObjectsDetail { + out.DeletedObjects = append(out.DeletedObjects, v.Key) + } + + return out, err +} + +// DeleteObjectVersions deletes multiple object versions. +// +// objectVersions the object keys and versions to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectVersionsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjectVersions(objectVersions []DeleteObject, options ...Option) (DeleteObjectVersionsResult, error) { + out := DeleteObjectVersionsResult{} + dxml := deleteXML{} + dxml.Objects = objectVersions + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// IsObjectExist checks if the object exists. +// +// bool flag of object's existence (true:exists; false:non-exist) when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) IsObjectExist(objectKey string, options ...Option) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey, options...) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 { + return false, nil + } + } + + return false, err +} + +// ListObjects lists the objects under the current bucket. +// +// options it contains all the filters for listing objects. +// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names. +// The key marker means the returned objects' key must be greater than it in lexicographic order. +// +// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns +// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns +// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects. +// The three filters could be used together to achieve filter and paging functionality. +// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders). +// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties. +// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects. +// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix. +// +// For common usage scenario, check out sample/list_object.go. +// +// ListObjectsResult the return value after operation succeeds (only valid when error is nil). +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// Recommend to use ListObjectsV2 to replace ListObjects +// ListOListObjectsV2bjects lists the objects under the current bucket. +// ListObjectsResultV2 the return value after operation succeeds (only valid when error is nil). +func (bucket Bucket) ListObjectsV2(options ...Option) (ListObjectsResultV2, error) { + var out ListObjectsResultV2 + + options = append(options, EncodingType("url")) + options = append(options, ListType(2)) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResultV2(&out) + return out, err +} + +// ListObjectVersions lists objects of all versions under the current bucket. +func (bucket Bucket) ListObjectVersions(options ...Option) (ListObjectVersionsResult, error) { + var out ListObjectVersionsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["versions"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectVersionsResult(&out) + return out, err +} + +// SetObjectMeta sets the metadata of the Object. +// +// objectKey object +// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, and custom metadata. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// GetObjectDetailedMeta gets the object's detailed metadata +// +// objectKey object key. +// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince, +// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header object meta when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// GetObjectMeta gets object metadata. +// +// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag +// size, LastModified. The size information is in the HTTP header Content-Length. +// +// objectKey object key +// +// http.Header the object's metadata, valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// SetObjectACL updates the object's ACL. +// +// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL. +// For example, if the bucket ACL is private and object's ACL is public-read-write. +// Then object's ACL is used and it means all users could read or write that object. +// When the object's ACL is not set, then bucket's ACL is used as the object's ACL. +// +// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object; +// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects, +// CompleteMultipartUpload and CopyObject on target object. +// +// objectKey the target object key (to set the ACL on) +// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType, options ...Option) error { + options = append(options, ObjectACL(objectACL)) + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetObjectACL gets object's ACL +// +// objectKey the object to get ACL from. +// +// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectACL(objectKey string, options ...Option) (GetObjectACLResult, error) { + var out GetObjectACLResult + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutSymlink creates a symlink (to point to an existing object) +// +// Symlink cannot point to another symlink. +// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink. +// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink. +// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten. +// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file. +// +// symObjectKey the symlink object's key. +// targetObjectKey the target object key to point to. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetSymlink gets the symlink object with the specified key. +// If the symlink object does not exist, returns 404. +// +// objectKey the symlink object's key. +// +// error it's nil if no error, otherwise it's an error object. +// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object. +// +func (bucket Bucket) GetSymlink(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// RestoreObject restores the object from the archive storage. +// +// An archive object is in cold status by default and it cannot be accessed. +// When restore is called on the cold object, it will become available for access after some time. +// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success. +// By default, the restored object is available for access for one day. After that it will be unavailable again. +// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days. +// +// objectKey object key to restore. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) RestoreObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectDetail support more features than RestoreObject +func (bucket Bucket) RestoreObjectDetail(objectKey string, restoreConfig RestoreConfiguration, options ...Option) error { + if restoreConfig.Tier == "" { + // Expedited, Standard, Bulk + restoreConfig.Tier = string(RestoreStandard) + } + + if restoreConfig.Days == 0 { + restoreConfig.Days = 1 + } + + bs, err := xml.Marshal(restoreConfig) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectXML support more features than RestoreObject +func (bucket Bucket) RestoreObjectXML(objectKey, configXML string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(configXML)) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// SignURL signs the URL. Users could access the object directly with this URL without getting the AK. +// +// objectKey the target object to sign. +// signURLConfig the configuration for the signed URL +// +// string returns the signed URL, when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := GetRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten. +// PutObjectWithURL It will not generate minetype according to the key name. +// +// signedURL signed URL. +// reader io.Reader the read instance for reading the data for the upload. +// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details: +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFileWithURL uploads an object from a local file with the signed URL. +// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name. +// +// signedURL the signed URL. +// filePath local file path, such as dirfile.txt, for uploading. +// options options for uploading, same as the options in PutObject function. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK) +// +// signedURL the signed URL. +// reader io.Reader the read instance for getting the data to upload. +// options options for uploading. +// +// Response the response object which contains the HTTP response. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL. +// +// signedURL the signed URL. +// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more information, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response, nil +} + +// GetObjectToFileWithURL downloads the object into a local file with the signed URL. +// +// signedURL the signed URL +// filePath the local file path to download to. +// options the options for downloading object. Check out the parameter options in function GetObject for the reference. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Get the object's content + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the file does not exist, create one. If exists, then overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Save the data to the file. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compare the CRC value. If CRC values do not match, return error. + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObjectWithURL is the actual API that downloads the file with the signed URL. +// +// signedURL the signed URL. +// options the options for getting object. Check out parameter options in GetObject for the reference. +// +// GetObjectResult the result object when the error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// +// ProcessObject apply process on the specified image file. +// +// The supported process includes resize, rotate, crop, watermark, format, +// udf, customized style, etc. +// +// +// objectKey object key to process. +// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA" +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) ProcessObject(objectKey string, process string, options ...Option) (ProcessObjectResult, error) { + var out ProcessObjectResult + params, _ := GetRawParams(options) + params["x-oss-process"] = nil + processData := fmt.Sprintf("%v=%v", "x-oss-process", process) + data := strings.NewReader(processData) + resp, err := bucket.do("POST", objectKey, params, nil, data, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = jsonUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutObjectTagging add tagging to object +// +// objectKey object key to add tagging +// tagging tagging to be added +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutObjectTagging(objectKey string, tagging Tagging, options ...Option) error { + bs, err := xml.Marshal(tagging) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params, _ := GetRawParams(options) + params["tagging"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// +// GetObjectTagging get tagging of the object +// +// objectKey object key to get tagging +// +// Tagging +// error nil if success, otherwise error + +func (bucket Bucket) GetObjectTagging(objectKey string, options ...Option) (GetObjectTaggingResult, error) { + var out GetObjectTaggingResult + params, _ := GetRawParams(options) + params["tagging"] = nil + + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteObjectTagging delete object taggging +// +// objectKey object key to delete tagging +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteObjectTagging(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["tagging"] = nil + + if objectKey == "" { + return fmt.Errorf("invalid argument: object name is empty") + } + + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +func (bucket Bucket) OptionsMethod(objectKey string, options ...Option) (http.Header, error) { + var out http.Header + resp, err := bucket.do("OPTIONS", objectKey, nil, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + out = resp.Headers + return out, nil +} + +// public +func (bucket Bucket) Do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + return bucket.do(method, objectName, params, options, data, listener) +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + err = CheckBucketName(bucket.BucketName) + if len(bucket.BucketName) > 0 && err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil && resp != nil { + pRespHeader := respHeader.(*http.Header) + if resp != nil { + *pRespHeader = resp.Headers + } + } + + return resp, err +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + if resp != nil { + *pRespHeader = resp.Headers + } + } + + return resp, err +} + +func (bucket Bucket) GetConfig() *Config { + return bucket.Client.Config +} + +func AddContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 0000000..443db98 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,2366 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "time" +) + +// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website). +// Object related operations are done by Bucket class. +// Users use oss.New to create Client instance. +// +type ( + // Client OSS client + Client struct { + Config *Config // OSS client configuration + Conn *Conn // Send HTTP request + HTTPClient *http.Client //http.Client to use - if nil will make its own + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// New creates a new client. +// +// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com . +// accessKeyId access key Id. +// accessKeySecret access key secret. +// +// Client creates the new client instance, the returned value is valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // Configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // URL parse + url := &urlMaker{} + err := url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + if err != nil { + return nil, err + } + + // HTTP connect + conn := &Conn{config: config, url: url} + + // OSS client + client := &Client{ + Config: config, + Conn: conn, + } + + // Client options parse + for _, option := range options { + option(client) + } + + if config.AuthVersion != AuthV1 && config.AuthVersion != AuthV2 && config.AuthVersion != AuthV4 { + return nil, fmt.Errorf("Init client Error, invalid Auth version: %v", config.AuthVersion) + } + + // Create HTTP connection + err = conn.init(config, url, client.HTTPClient) + + return client, err +} + +// SetRegion set region for client +// +// region the region, such as cn-hangzhou +func (client *Client) SetRegion(region string) { + client.Config.Region = region +} + +// SetCloudBoxId set CloudBoxId for client +// +// cloudBoxId the id of cloudBox +func (client *Client) SetCloudBoxId(cloudBoxId string) { + client.Config.CloudBoxId = cloudBoxId +} + +// SetProduct set Product type for client +// +// Product product type +func (client *Client) SetProduct(product string) { + client.Config.Product = product +} + +// Bucket gets the bucket instance. +// +// bucketName the bucket name. +// Bucket the bucket object, when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + err := CheckBucketName(bucketName) + if err != nil { + return nil, err + } + + return &Bucket{ + client, + bucketName, + }, nil +} + +// CreateBucket creates a bucket. +// +// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-'). +// It must start with lowercase letter or number and the length can only be between 3 and 255. +// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate. +// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + var cbConfig createBucketConfiguration + cbConfig.StorageClass = StorageStandard + + isStorageSet, valStroage, _ := IsOptionSet(options, storageClass) + isRedundancySet, valRedundancy, _ := IsOptionSet(options, redundancyType) + isObjectHashFuncSet, valHashFunc, _ := IsOptionSet(options, objectHashFunc) + if isStorageSet { + cbConfig.StorageClass = valStroage.(StorageClassType) + } + + if isRedundancySet { + cbConfig.DataRedundancyType = valRedundancy.(DataRedundancyType) + } + + if isObjectHashFuncSet { + cbConfig.ObjectHashFunction = valHashFunc.(ObjecthashFuncType) + } + + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// create bucket xml +func (client Client) CreateBucketXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ListBuckets lists buckets of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// ListCloudBoxes lists cloud boxes of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListCloudBoxes(options ...Option) (ListCloudBoxResult, error) { + var out ListCloudBoxResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["cloudboxes"] = nil + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// IsBucketExist checks if the bucket exists +// +// bucketName the bucket name. +// +// bool true if it exists, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts). +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucket(bucketName string, options ...Option) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLocation gets the bucket location. +// +// Checks out the following link for more information : +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName the bucket name +// +// string bucket's datacenter location +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// SetBucketACL sets bucket's ACL. +// +// bucketName the bucket name +// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType, options ...Option) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("PUT", bucketName, params, headers, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketACL gets the bucket ACL. +// +// bucketName the bucket name. +// +// GetBucketAclResponse the result object, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketACL(bucketName string, options ...Option) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLifecycle sets the bucket's lifecycle. +// +// For more information, checks out following link: +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName the bucket name. +// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively. +// Check out sample/bucket_lifecycle.go for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule, options ...Option) error { + err := verifyLifecycleRules(rules) + if err != nil { + return err + } + lifecycleCfg := LifecycleConfiguration{Rules: rules} + bs, err := xml.Marshal(lifecycleCfg) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketLifecycleXml sets the bucket's lifecycle rule from xml config +func (client Client) SetBucketLifecycleXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLifecycle deletes the bucket's lifecycle. +// +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLifecycle(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLifecycle gets the bucket's lifecycle settings. +// +// bucketName the bucket name. +// +// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLifecycle(bucketName string, options ...Option) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // NonVersionTransition is not suggested to use + // to keep compatible + for k, rule := range out.Rules { + if len(rule.NonVersionTransitions) > 0 { + out.Rules[k].NonVersionTransition = &(out.Rules[k].NonVersionTransitions[0]) + } + } + return out, err +} + +func (client Client) GetBucketLifecycleXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer. +// +// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as +// the allowing empty referrer flag. Note that this applies to requests from webbrowser only. +// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket. +// For more information, please check out this link : +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName the bucket name. +// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards. +// The sample could be found in sample/bucket_referer.go +// allowEmptyReferer the flag of allowing empty referrer. By default it's true. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool, options ...Option) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReferer gets the bucket's referrer white list. +// +// bucketName the bucket name. +// +// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReferer(bucketName string, options ...Option) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLogging sets the bucket logging settings. +// +// OSS could automatically store the access log. Only the bucket owner could enable the logging. +// Once enabled, OSS would save all the access log into hourly log files in a specified bucket. +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName bucket name to enable the log. +// targetBucket the target bucket name to store the log files. +// targetPrefix the log files' prefix. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool, options ...Option) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket. +// +// bucketName the bucket name to disable the logging. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLogging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLogging gets the bucket's logging settings +// +// bucketName the bucket name +// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLogging(bucketName string, options ...Option) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketWebsite sets the bucket's static website's index and error page. +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// indexDocument index page. +// errorDocument error page. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string, options ...Option) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteDetail sets the bucket's static website's detail +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML, options ...Option) error { + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteXml sets the bucket's static website's rule +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteXml(bucketName string, webXml string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(webXml)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketWebsite deletes the bucket's static web site settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketWebsite(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketWebsite gets the bucket's default page (index page) and the error page. +// +// bucketName the bucket name +// +// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsite(bucketName string, options ...Option) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketWebsiteXml gets the bucket's website config xml config. +// +// bucketName the bucket name +// +// string the bucket's xml config, It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsiteXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketCORS sets the bucket's CORS rules +// +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName the bucket name +// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule, options ...Option) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +func (client Client) SetBucketCORSXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketCORS deletes the bucket's static website settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketCORS(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketCORS gets the bucket's CORS settings. +// +// bucketName the bucket name. +// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketCORS(bucketName string, options ...Option) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +func (client Client) GetBucketCORSXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// GetBucketInfo gets the bucket information. +// +// bucketName the bucket name. +// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketInfo(bucketName string, options ...Option) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // convert None to "" + if err == nil { + if out.BucketInfo.SseRule.KMSMasterKeyID == "None" { + out.BucketInfo.SseRule.KMSMasterKeyID = "" + } + + if out.BucketInfo.SseRule.SSEAlgorithm == "None" { + out.BucketInfo.SseRule.SSEAlgorithm = "" + } + + if out.BucketInfo.SseRule.KMSDataEncryption == "None" { + out.BucketInfo.SseRule.KMSDataEncryption = "" + } + } + return out, err +} + +// SetBucketVersioning set bucket versioning:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketVersioning(bucketName string, versioningConfig VersioningConfig, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(versioningConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketVersioning get bucket versioning status:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketVersioning(bucketName string, options ...Option) (GetBucketVersioningResult, error) { + var out GetBucketVersioningResult + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketEncryption set bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketEncryption(bucketName string, encryptionRule ServerEncryptionRule, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(encryptionRule) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketEncryption get bucket encryption +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketEncryption(bucketName string, options ...Option) (GetBucketEncryptionResult, error) { + var out GetBucketEncryptionResult + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketEncryption delete bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error bucket +func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SetBucketTagging add tagging to bucket +// bucketName name of bucket +// tagging tagging to be added +// error nil if success, otherwise error +func (client Client) SetBucketTagging(bucketName string, tagging Tagging, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(tagging) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTagging get tagging of the bucket +// bucketName name of bucket +// error nil if success, otherwise error +func (client Client) GetBucketTagging(bucketName string, options ...Option) (GetBucketTaggingResult, error) { + var out GetBucketTaggingResult + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteBucketTagging delete bucket tagging +// bucketName name of bucket +// error nil if success, otherwise error +// +func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketStat get bucket stat +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketStat(bucketName string, options ...Option) (GetBucketStatResult, error) { + var out GetBucketStatResult + params := map[string]interface{}{} + params["stat"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketPolicy API operation for Object Storage Service. +// +// Get the policy from the bucket. +// +// bucketName the bucket name. +// +// string return the bucket's policy, and it's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketPolicy(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["policy"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketPolicy API operation for Object Storage Service. +// +// Set the policy from the bucket. +// +// bucketName the bucket name. +// +// policy the bucket policy. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketPolicy(bucketName string, policy string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + + buffer := strings.NewReader(policy) + + resp, err := client.do("PUT", bucketName, params, nil, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketPolicy API operation for Object Storage Service. +// +// Deletes the policy from the bucket. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketRequestPayment API operation for Object Storage Service. +// +// Set the requestPayment of bucket +// +// bucketName the bucket name. +// +// paymentConfig the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["requestPayment"] = nil + + var bs []byte + bs, err := xml.Marshal(paymentConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketRequestPayment API operation for Object Storage Service. +// +// Get bucket requestPayment +// +// bucketName the bucket name. +// +// RequestPaymentConfiguration the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) { + var out RequestPaymentConfiguration + params := map[string]interface{}{} + params["requestPayment"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetUserQoSInfo API operation for Object Storage Service. +// +// Get user qos. +// +// UserQoSConfiguration the User Qos and range Information. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) { + var out UserQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketQoSInfo API operation for Object Storage Service. +// +// Set Bucket Qos information. +// +// bucketName the bucket name. +// +// qosConf the qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + var bs []byte + bs, err := xml.Marshal(qosConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentTpye := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentTpye + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketQosInfo API operation for Object Storage Service. +// +// Get Bucket Qos information. +// +// bucketName the bucket name. +// +// BucketQoSConfiguration the return qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) { + var out BucketQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketQosInfo API operation for Object Storage Service. +// +// Delete Bucket QoS information. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketInventory API operation for Object Storage Service +// +// Set the Bucket inventory. +// +// bucketName the bucket name. +// +// inventoryConfig the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) SetBucketInventory(bucketName string, inventoryConfig InventoryConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["inventoryId"] = inventoryConfig.Id + params["inventory"] = nil + + var bs []byte + bs, err := xml.Marshal(inventoryConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketInventoryXml API operation for Object Storage Service +// +// Set the Bucket inventory +// +// bucketName the bucket name. +// +// xmlBody the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) SetBucketInventoryXml(bucketName string, xmlBody string, options ...Option) error { + var inventoryConfig InventoryConfiguration + err := xml.Unmarshal([]byte(xmlBody), &inventoryConfig) + if err != nil { + return err + } + + if inventoryConfig.Id == "" { + return fmt.Errorf("inventory id is empty in xml") + } + + params := map[string]interface{}{} + params["inventoryId"] = inventoryConfig.Id + params["inventory"] = nil + + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketInventory API operation for Object Storage Service +// +// Get the Bucket inventory. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// InventoryConfiguration the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) GetBucketInventory(bucketName string, strInventoryId string, options ...Option) (InventoryConfiguration, error) { + var out InventoryConfiguration + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketInventoryXml API operation for Object Storage Service +// +// Get the Bucket inventory. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// InventoryConfiguration the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) GetBucketInventoryXml(bucketName string, strInventoryId string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// ListBucketInventory API operation for Object Storage Service +// +// List the Bucket inventory. +// +// bucketName tht bucket name. +// +// continuationToken the users token. +// +// ListInventoryConfigurationsResult list all inventory configuration by . +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) ListBucketInventory(bucketName, continuationToken string, options ...Option) (ListInventoryConfigurationsResult, error) { + var out ListInventoryConfigurationsResult + params := map[string]interface{}{} + params["inventory"] = nil + if continuationToken == "" { + params["continuation-token"] = nil + } else { + params["continuation-token"] = continuationToken + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// ListBucketInventoryXml API operation for Object Storage Service +// +// List the Bucket inventory. +// +// bucketName tht bucket name. +// +// continuationToken the users token. +// +// ListInventoryConfigurationsResult list all inventory configuration by . +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) ListBucketInventoryXml(bucketName, continuationToken string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["inventory"] = nil + if continuationToken == "" { + params["continuation-token"] = nil + } else { + params["continuation-token"] = continuationToken + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// DeleteBucketInventory API operation for Object Storage Service. +// +// Delete Bucket inventory information. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) DeleteBucketInventory(bucketName, strInventoryId string, options ...Option) error { + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// asynConf configruation +// +// error it's nil if success, otherwise it's an error. +func (client Client) SetBucketAsyncTask(bucketName string, asynConf AsyncFetchTaskConfiguration, options ...Option) (AsyncFetchTaskResult, error) { + var out AsyncFetchTaskResult + params := map[string]interface{}{} + params["asyncFetch"] = nil + + var bs []byte + bs, err := xml.Marshal(asynConf) + + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + + if err != nil { + return out, err + } + + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// taskid returned by SetBucketAsyncTask +// +// error it's nil if success, otherwise it's an error. +func (client Client) GetBucketAsyncTask(bucketName string, taskID string, options ...Option) (AsynFetchTaskInfo, error) { + var out AsynFetchTaskInfo + params := map[string]interface{}{} + params["asyncFetch"] = nil + + headers := make(map[string]string) + headers[HTTPHeaderOssTaskID] = taskID + resp, err := client.do("GET", bucketName, params, headers, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// InitiateBucketWorm creates bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) InitiateBucketWorm(bucketName string, retentionDays int, options ...Option) (string, error) { + var initiateWormConf InitiateWormConfiguration + initiateWormConf.RetentionPeriodInDays = retentionDays + + var respHeader http.Header + isOptSet, _, _ := IsOptionSet(options, responseHeader) + if !isOptSet { + options = append(options, GetResponseHeader(&respHeader)) + } + + bs, err := xml.Marshal(initiateWormConf) + if err != nil { + return "", err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respOpt, _ := FindOption(options, responseHeader, nil) + wormID := "" + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + if err == nil && respOpt != nil { + wormID = (respOpt.(*http.Header)).Get("x-oss-worm-id") + } + return wormID, err +} + +// AbortBucketWorm delete bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) AbortBucketWorm(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["worm"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// CompleteBucketWorm complete bucket worm Configuration +// bucketName the bucket name. +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CompleteBucketWorm(bucketName string, wormID string, options ...Option) error { + params := map[string]interface{}{} + params["wormId"] = wormID + resp, err := client.do("POST", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ExtendBucketWorm exetend bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ExtendBucketWorm(bucketName string, retentionDays int, wormID string, options ...Option) error { + var extendWormConf ExtendWormConfiguration + extendWormConf.RetentionPeriodInDays = retentionDays + + bs, err := xml.Marshal(extendWormConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["wormId"] = wormID + params["wormExtend"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketWorm get bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWorm(bucketName string, options ...Option) (WormConfiguration, error) { + var out WormConfiguration + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketTransferAcc set bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketTransferAcc(bucketName string, accConf TransferAccConfiguration, options ...Option) error { + bs, err := xml.Marshal(accConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTransferAcc get bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketTransferAcc(bucketName string, options ...Option) (TransferAccConfiguration, error) { + var out TransferAccConfiguration + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketTransferAcc delete bucket transfer acceleration configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketTransferAcc(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// PutBucketReplication put bucket replication configuration +// bucketName the bucket name. +// xmlBody the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) PutBucketReplication(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "add" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplication get bucket replication configuration +// bucketName the bucket name. +// string the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplication(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replication"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// DeleteBucketReplication delete bucket replication configuration +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketReplication(bucketName string, ruleId string, options ...Option) error { + replicationxml := ReplicationXML{} + replicationxml.ID = ruleId + + bs, err := xml.Marshal(replicationxml) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "delete" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplicationLocation get the locations of the target bucket that can be copied to +// bucketName the bucket name. +// string the locations of the target bucket that can be copied to. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationLocation"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketReplicationProgress get the replication progress of bucket +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// string the replication progress of bucket. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationProgress(bucketName string, ruleId string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationProgress"] = nil + if ruleId != "" { + params["rule-id"] = ruleId + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketCname get bucket's binding cname +// bucketName the bucket name. +// string the xml configuration of bucket. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketCname(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cname"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// CreateBucketCnameToken create a token for the cname. +// bucketName the bucket name. +// cname a custom domain name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) CreateBucketCnameToken(bucketName string, cname string, options ...Option) (CreateBucketCnameTokenResult, error) { + var out CreateBucketCnameTokenResult + params := map[string]interface{}{} + params["cname"] = nil + params["comp"] = "token" + + rxml := CnameConfigurationXML{} + rxml.Domain = cname + + bs, err := xml.Marshal(rxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketCnameToken get a token for the cname +// bucketName the bucket name. +// cname a custom domain name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketCnameToken(bucketName string, cname string, options ...Option) (GetBucketCnameTokenResult, error) { + var out GetBucketCnameTokenResult + params := map[string]interface{}{} + params["cname"] = cname + params["comp"] = "token" + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutBucketCname map a custom domain name to a bucket +// bucketName the bucket name. +// xmlBody the cname configuration in xml foramt +// error it's nil if no error, otherwise it's an error object. +func (client Client) PutBucketCnameXml(bucketName string, xmlBody string, options ...Option) error { + params := map[string]interface{}{} + params["cname"] = nil + params["comp"] = "add" + + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// PutBucketCname map a custom domain name to a bucket +// bucketName the bucket name. +// cname a custom domain name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) PutBucketCname(bucketName string, cname string, options ...Option) error { + rxml := CnameConfigurationXML{} + rxml.Domain = cname + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + return client.PutBucketCnameXml(bucketName, string(bs), options...) +} + +// DeleteBucketCname remove the mapping of the custom domain name from a bucket. +// bucketName the bucket name. +// cname a custom domain name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) DeleteBucketCname(bucketName string, cname string, options ...Option) error { + params := map[string]interface{}{} + params["cname"] = nil + params["comp"] = "delete" + + rxml := CnameConfigurationXML{} + rxml.Domain = cname + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited +// upSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitUploadSpeed(upSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitUploadSpeed(upSpeed) +} + +// LimitDownloadSpeed set download bandwidth limit speed,default is 0,unlimited +// downSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitDownloadSpeed(downSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitDownloadSpeed(downSpeed) +} + +// UseCname sets the flag of using CName. By default it's false. +// +// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false. +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Timeout sets the HTTP timeout in seconds. +// +// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended) +// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite. +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.IdleConnTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// MaxConns sets the HTTP max connections for a client. +// +// maxIdleConns controls the maximum number of idle (keep-alive) connections across all hosts. Default is 100. +// maxIdleConnsPerHost controls the maximum idle (keep-alive) connections to keep per-host. Default is 100. +// maxConnsPerHost limits the total number of connections per host. Default is no limit. +// +func MaxConns(maxIdleConns, maxIdleConnsPerHost, maxConnsPerHost int) ClientOption { + return func(client *Client) { + client.Config.HTTPMaxConns.MaxIdleConns = maxIdleConns + client.Config.HTTPMaxConns.MaxIdleConnsPerHost = maxIdleConnsPerHost + client.Config.HTTPMaxConns.MaxConnsPerHost = maxConnsPerHost + } +} + +// SecurityToken sets the temporary user's SecurityToken. +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// EnableMD5 enables MD5 validation. +// +// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation. +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB. +// +// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5. +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// EnableCRC enables the CRC checksum. Default is true. +// +// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum. +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2). +// +// userAgent the user agent string. +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + client.Config.UserSetUa = true + } +} + +// Proxy sets the proxy (optional). The default is not using proxy. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// AuthProxy sets the proxy information with user name and password. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// proxyUser the proxy user name. +// proxyPassword the proxy password. +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// HTTPClient sets the http.Client in use to the one passed in +// +func HTTPClient(HTTPClient *http.Client) ClientOption { + return func(client *Client) { + client.HTTPClient = HTTPClient + } +} + +// +// SetLogLevel sets the oss sdk log level +// +func SetLogLevel(LogLevel int) ClientOption { + return func(client *Client) { + client.Config.LogLevel = LogLevel + } +} + +// +// SetLogger sets the oss sdk logger +// +func SetLogger(Logger *log.Logger) ClientOption { + return func(client *Client) { + client.Config.Logger = Logger + } +} + +// SetCredentialsProvider sets funciton for get the user's ak +func SetCredentialsProvider(provider CredentialsProvider) ClientOption { + return func(client *Client) { + client.Config.CredentialsProvider = provider + } +} + +// SetLocalAddr sets funciton for local addr +func SetLocalAddr(localAddr net.Addr) ClientOption { + return func(client *Client) { + client.Config.LocalAddr = localAddr + } +} + +// AuthVersion sets auth version: v1 or v2 signature which oss_server needed +func AuthVersion(authVersion AuthVersionType) ClientOption { + return func(client *Client) { + client.Config.AuthVersion = authVersion + } +} + +// AdditionalHeaders sets special http headers needed to be signed +func AdditionalHeaders(headers []string) ClientOption { + return func(client *Client) { + client.Config.AdditionalHeaders = headers + } +} + +// only effective from go1.7 onward,RedirectEnabled set http redirect enabled or not +func RedirectEnabled(enabled bool) ClientOption { + return func(client *Client) { + client.Config.RedirectEnabled = enabled + } +} + +// skip verifying tls certificate file +func InsecureSkipVerify(enabled bool) ClientOption { + return func(client *Client) { + client.Config.InsecureSkipVerify = enabled + } +} + +// Region set region +func Region(region string) ClientOption { + return func(client *Client) { + client.Config.Region = region + } +} + +// CloudBoxId set cloudBox id +func CloudBoxId(cloudBoxId string) ClientOption { + return func(client *Client) { + client.Config.CloudBoxId = cloudBoxId + } +} + +// Product set product type +func Product(product string) ClientOption { + return func(client *Client) { + client.Config.Product = product + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader, options ...Option) (*Response, error) { + err := CheckBucketName(bucketName) + if len(bucketName) > 0 && err != nil { + return nil, err + } + + // option headers + addHeaders := make(map[string]string) + err = handleOptions(addHeaders, options) + if err != nil { + return nil, err + } + + // merge header + if headers == nil { + headers = make(map[string]string) + } + + for k, v := range addHeaders { + if _, ok := headers[k]; !ok { + headers[k] = v + } + } + + resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + if resp != nil { + *pRespHeader = resp.Headers + } + } + + return resp, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 0000000..94baef7 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,229 @@ +package oss + +import ( + "bytes" + "fmt" + "log" + "net" + "os" + "time" +) + +// Define the level of the output log +const ( + LogOff = iota + Error + Warn + Info + Debug +) + +// LogTag Tag for each level of log +var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"} + +// HTTPTimeout defines HTTP timeout. +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration + IdleConnTimeout time.Duration +} + +// HTTPMaxConns defines max idle connections and max idle connections per host +type HTTPMaxConns struct { + MaxIdleConns int + MaxIdleConnsPerHost int + MaxConnsPerHost int +} + +// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken +type Credentials interface { + GetAccessKeyID() string + GetAccessKeySecret() string + GetSecurityToken() string +} + +// CredentialInfBuild is interface for get CredentialInf +type CredentialsProvider interface { + GetCredentials() Credentials +} + +type defaultCredentials struct { + config *Config +} + +func (defCre *defaultCredentials) GetAccessKeyID() string { + return defCre.config.AccessKeyID +} + +func (defCre *defaultCredentials) GetAccessKeySecret() string { + return defCre.config.AccessKeySecret +} + +func (defCre *defaultCredentials) GetSecurityToken() string { + return defCre.config.SecurityToken +} + +type defaultCredentialsProvider struct { + config *Config +} + +func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials { + return &defaultCredentials{config: defBuild.config} +} + +// Config defines oss configuration +type Config struct { + Endpoint string // OSS endpoint + AccessKeyID string // AccessId + AccessKeySecret string // AccessKey + RetryTimes uint // Retry count by default it's 5. + UserAgent string // SDK name/version/system information + IsDebug bool // Enable debug mode. Default is false. + Timeout uint // Timeout in seconds. By default it's 60. + SecurityToken string // STS Token + IsCname bool // If cname is in the endpoint. + HTTPTimeout HTTPTimeout // HTTP timeout + HTTPMaxConns HTTPMaxConns // Http max connections + IsUseProxy bool // Flag of using proxy. + ProxyHost string // Flag of using proxy host. + IsAuthProxy bool // Flag of needing authentication. + ProxyUser string // Proxy user + ProxyPassword string // Proxy password + IsEnableMD5 bool // Flag of enabling MD5 for upload. + MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used. + IsEnableCRC bool // Flag of enabling CRC for upload. + LogLevel int // Log level + Logger *log.Logger // For write log + UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited + UploadLimiter *OssLimiter // Bandwidth limit reader for upload + DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited + DownloadLimiter *OssLimiter // Bandwidth limit reader for download + CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken + LocalAddr net.Addr // local client host info + UserSetUa bool // UserAgent is set by user or not + AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1 + AdditionalHeaders []string // special http headers needed to be sign + RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not + InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file + Region string // such as cn-hangzhou + CloudBoxId string // + Product string // oss or oss-cloudbox, default is oss +} + +// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitUploadSpeed(uploadSpeed int) error { + if uploadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0") + } else if uploadSpeed == 0 { + config.UploadLimitSpeed = 0 + config.UploadLimiter = nil + return nil + } + + var err error + config.UploadLimiter, err = GetOssLimiter(uploadSpeed) + if err == nil { + config.UploadLimitSpeed = uploadSpeed + } + return err +} + +// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitDownloadSpeed(downloadSpeed int) error { + if downloadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0") + } else if downloadSpeed == 0 { + config.DownloadLimitSpeed = 0 + config.DownloadLimiter = nil + return nil + } + + var err error + config.DownloadLimiter, err = GetOssLimiter(downloadSpeed) + if err == nil { + config.DownloadLimitSpeed = downloadSpeed + } + return err +} + +// WriteLog output log function +func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) { + if config.LogLevel < LogLevel || config.Logger == nil { + return + } + + var logBuffer bytes.Buffer + logBuffer.WriteString(LogTag[LogLevel-1]) + logBuffer.WriteString(fmt.Sprintf(format, a...)) + config.Logger.Printf("%s", logBuffer.String()) +} + +// for get Credentials +func (config *Config) GetCredentials() Credentials { + return config.CredentialsProvider.GetCredentials() +} + +// for get Sign Product +func (config *Config) GetSignProduct() string { + if config.CloudBoxId != "" { + return "oss-cloudbox" + } + return "oss" +} + +// for get Sign Region +func (config *Config) GetSignRegion() string { + if config.CloudBoxId != "" { + return config.CloudBoxId + } + return config.Region +} + +// getDefaultOssConfig gets the default configuration. +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent() + config.Timeout = 60 // Seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s + config.HTTPMaxConns.MaxIdleConns = 100 + config.HTTPMaxConns.MaxIdleConnsPerHost = 100 + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + config.LogLevel = LogOff + config.Logger = log.New(os.Stdout, "", log.LstdFlags) + + provider := &defaultCredentialsProvider{config: &config} + config.CredentialsProvider = provider + + config.AuthVersion = AuthV1 + config.RedirectEnabled = true + config.InsecureSkipVerify = false + + config.Product = "oss" + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 0000000..4c37aae --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,916 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn defines OSS Conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", + "logging", "website", "referer", "lifecycle", + "delete", "append", "tagging", "objectMeta", + "uploadId", "partNumber", "security-token", + "position", "img", "style", "styleName", + "replication", "replicationProgress", + "replicationLocation", "cname", "bucketInfo", + "comp", "qos", "live", "status", "vod", + "startTime", "endTime", "symlink", + "x-oss-process", "response-content-type", "x-oss-traffic-limit", + "response-content-language", "response-expires", + "response-cache-control", "response-content-disposition", + "response-content-encoding", "udf", "udfName", "udfImage", + "udfId", "udfImageDesc", "udfApplication", "comp", + "udfApplicationLog", "restore", "callback", "callback-var", "qosInfo", + "policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment", + "x-oss-request-payer", "sequential", + "inventory", "inventoryId", "continuation-token", "asyncFetch", + "worm", "wormId", "wormExtend", "withHashContext", + "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256", + "x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration", + "regionList", "cloudboxes", +} + +// init initializes Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { + if client == nil { + // New transport + transport := newTransport(conn, config) + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + if config.IsAuthProxy { + if config.ProxyPassword != "" { + proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword) + } else { + proxyURL.User = url.User(config.ProxyUser) + } + } + transport.Proxy = http.ProxyURL(proxyURL) + } + client = &http.Client{Transport: transport} + if !config.RedirectEnabled { + disableHTTPRedirect(client) + } + } + + conn.config = config + conn.url = urlMaker + conn.client = client + + return nil +} + +// Do sends request and returns the response +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + + resource := "" + if conn.config.AuthVersion != AuthV4 { + resource = conn.getResource(bucketName, objectName, subResource) + } else { + resource = conn.getResourceV4(bucketName, objectName, subResource) + } + + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL sends the request with signed URL and returns the response result. +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // Get URI from signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil && params[k].(string) != "" { + buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + signParams := make(map[string]string) + for k := range params { + if conn.config.AuthVersion == AuthV2 || conn.config.AuthVersion == AuthV4 { + encodedKey := url.QueryEscape(k) + keys = append(keys, encodedKey) + if params[k] != nil && params[k] != "" { + signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1) + } + } else if conn.isParamSign(k) { + keys = append(keys, k) + if params[k] != nil { + signParams[k] = params[k].(string) + } + } + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if _, ok := signParams[k]; ok { + if signParams[k] != "" { + buf.WriteString("=" + signParams[k]) + } + } + } + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +// getResource gets canonicalized resource +func (conn Conn) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/") + subResource + } + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} + +// getResource gets canonicalized resource +func (conn Conn) getResourceV4(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + + if bucketName == "" { + return fmt.Sprintf("/%s", subResource) + } + + if objectName != "" { + objectName = url.QueryEscape(objectName) + objectName = strings.Replace(objectName, "+", "%20", -1) + objectName = strings.Replace(objectName, "%2F", "/", -1) + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) + } + return fmt.Sprintf("/%s/%s", bucketName, subResource) +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + stNow := time.Now().UTC() + req.Header.Set(HTTPHeaderDate, stNow.Format(http.TimeFormat)) + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if conn.config.AuthVersion == AuthV4 { + req.Header.Set(HttpHeaderOssContentSha256, DefaultContentSha256) + } + + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken()) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureVersion] = "OSS2" + params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";") + } + } + + subResource := conn.getSubResource(params) + canonicalizedResource := conn.getResource(bucketName, objectName, subResource) + signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + + if conn.config.AuthVersion == AuthV1 { + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + params[HTTPParamSignature] = signedStr + } else if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureV2] = signedStr + } + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string { + params := map[string]interface{}{} + if playlistName != "" { + params[HTTPParamPlaylistName] = playlistName + } + expireStr := strconv.FormatInt(expiration, 10) + params[HTTPParamExpires] = expireStr + + akIf := conn.config.GetCredentials() + if akIf.GetAccessKeyID() != "" { + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params) + params[HTTPParamSignature] = signedStr + } + + urlParams := conn.getURLParams(params) + return conn.url.getSignRtmpURL(bucketName, channelName, urlParams) +} + +// handleBody handles request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + readerLen, err := GetReaderLen(reader) + if err == nil { + req.ContentLength = readerLen + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // MD5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // CRC + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(CrcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // HTTP body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + + if conn.isUploadLimitReq(req) { + limitReader := &LimitSpeedReader{ + reader: rc, + ossLimiter: conn.config.UploadLimiter, + } + req.Body = limitReader + } else { + req.Body = rc + } + return file, crc +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isUploadLimitReq(req *http.Request) bool { + if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil { + return false + } + + if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" { + if req.ContentLength > 0 { + return true + } + } + return false +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handleResponse handles response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode/100 != 2 { + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // OSS use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } else { + // (0,300) [308,400) [506,) + // Other extended http StatusCode + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } + } else { + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + realBody := resp.Body + if conn.isDownloadLimitResponse(resp) { + limitReader := &LimitSpeedReader{ + reader: realBody, + ossLimiter: conn.config.DownloadLimiter, + } + realBody = limitReader + } + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: realBody, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil + } +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool { + if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil { + return false + } + + if strings.EqualFold(resp.Request.Method, "GET") { + return true + } + return false +} + +// LoggerHTTPReq Print the header information of the http request +func (conn Conn) LoggerHTTPReq(req *http.Request) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range req.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range resp.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // Huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // Small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func jsonUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// timeoutConn handles HTTP timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker builds URL and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // HTTP or HTTPS + NetLoc string // Host or IP + Type int // 1 CNAME, 2 IP, 3 ALIYUN + IsProxy bool // Proxy +} + +// Init parses endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + //use url.Parse() to get real host + strUrl := um.Scheme + "://" + um.NetLoc + url, err := url.Parse(strUrl) + if err != nil { + return err + } + + um.NetLoc = url.Host + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + } + + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy + + return nil +} + +// getURL gets URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// getSignURL gets sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// getSignRtmpURL Build Sign Rtmp URL +func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string { + host, path := um.buildURL(bucket, "live") + + channelName = url.QueryEscape(channelName) + channelName = strings.Replace(channelName, "+", "%20", -1) + + return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params) +} + +// buildURL builds URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} + +// buildURL builds URL +func (um urlMaker) buildURLV4(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + // no escape / + object = strings.Replace(object, "%2F", "/", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } + return host, path +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 0000000..c79aa3c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,264 @@ +package oss + +import "os" + +// ACLType bucket/object ACL +type ACLType string + +const ( + // ACLPrivate definition : private read and write + ACLPrivate ACLType = "private" + + // ACLPublicRead definition : public read and private write + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite definition : public read and public write + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object. It's only applicable for object. + ACLDefault ACLType = "default" +) + +// bucket versioning status +type VersioningStatus string + +const ( + // Versioning Status definition: Enabled + VersionEnabled VersioningStatus = "Enabled" + + // Versioning Status definition: Suspended + VersionSuspended VersioningStatus = "Suspended" +) + +// MetadataDirectiveType specifying whether use the metadata of source object when copying object. +type MetadataDirectiveType string + +const ( + // MetaCopy the target object's metadata is copied from the source one + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace the target object's metadata is created as part of the copy request (not same as the source one) + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// TaggingDirectiveType specifying whether use the tagging of source object when copying object. +type TaggingDirectiveType string + +const ( + // TaggingCopy the target object's tagging is copied from the source one + TaggingCopy TaggingDirectiveType = "COPY" + + // TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one) + TaggingReplace TaggingDirectiveType = "REPLACE" +) + +// AlgorithmType specifying the server side encryption algorithm name +type AlgorithmType string + +const ( + KMSAlgorithm AlgorithmType = "KMS" + AESAlgorithm AlgorithmType = "AES256" + SM4Algorithm AlgorithmType = "SM4" +) + +// StorageClassType bucket storage type +type StorageClassType string + +const ( + // StorageStandard standard + StorageStandard StorageClassType = "Standard" + + // StorageIA infrequent access + StorageIA StorageClassType = "IA" + + // StorageArchive archive + StorageArchive StorageClassType = "Archive" + + // StorageColdArchive cold archive + StorageColdArchive StorageClassType = "ColdArchive" +) + +//RedundancyType bucket data Redundancy type +type DataRedundancyType string + +const ( + // RedundancyLRS Local redundancy, default value + RedundancyLRS DataRedundancyType = "LRS" + + // RedundancyZRS Same city redundancy + RedundancyZRS DataRedundancyType = "ZRS" +) + +//ObjecthashFuncType +type ObjecthashFuncType string + +const ( + HashFuncSha1 ObjecthashFuncType = "SHA-1" + HashFuncSha256 ObjecthashFuncType = "SHA-256" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +//RestoreMode the restore mode for coldArchive object +type RestoreMode string + +const ( + //RestoreExpedited object will be restored in 1 hour + RestoreExpedited RestoreMode = "Expedited" + + //RestoreStandard object will be restored in 2-5 hours + RestoreStandard RestoreMode = "Standard" + + //RestoreBulk object will be restored in 5-10 hours + RestoreBulk RestoreMode = "Bulk" +) + +// HTTPMethod HTTP request method +type HTTPMethod string + +const ( + // HTTPGet HTTP GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + HTTPHeaderACReqMethod = "Access-Control-Request-Method" + HTTPHeaderACReqHeaders = "Access-Control-Request-Headers" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" + HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" + HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" + HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" + HTTPHeaderOssCallback = "X-Oss-Callback" + HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" + HTTPHeaderOssRequester = "X-Oss-Request-Payer" + HTTPHeaderOssTagging = "X-Oss-Tagging" + HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit" + HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" + HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior" + HTTPHeaderOssTaskID = "X-Oss-Task-Id" + HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx" + HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx" + HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" + HttpHeaderOssDate = "X-Oss-Date" + HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256" +) + +// HTTP Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" + HTTPParamPlaylistName = "playlistName" + + HTTPParamSignatureVersion = "x-oss-signature-version" + HTTPParamExpiresV2 = "x-oss-expires" + HTTPParamAccessKeyIDV2 = "x-oss-access-key-id" + HTTPParamSignatureV2 = "x-oss-signature" + HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers" +) + +// Other constants +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB + MinPartSize = 100 * 1024 // Min part size, 100KB + + FilePermMode = os.FileMode(0664) // Default file permission + + TempFilePrefix = "oss-go-temp-" // Temp file prefix + TempFileSuffix = ".temp" // Temp file suffix + + CheckpointFileSuffix = ".cp" // Checkpoint file suffix + + NullVersion = "null" + + DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature + + Version = "v2.2.4" // Go SDK version +) + +// FrameType +const ( + DataFrameType = 8388609 + ContinuousFrameType = 8388612 + EndFrameType = 8388613 + MetaEndFrameCSVType = 8388614 + MetaEndFrameJSONType = 8388615 +) + +// AuthVersion the version of auth +type AuthVersionType string + +const ( + // AuthV1 v1 + AuthV1 AuthVersionType = "v1" + // AuthV2 v2 + AuthV2 AuthVersionType = "v2" + // AuthV4 v4 + AuthV4 AuthVersionType = "v4" +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 0000000..c96694f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 0000000..90c1b63 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,567 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" + "time" +) + +// DownloadFile downloads files with multipart download. +// +// objectKey the object key. +// filePath the local file to download from objectKey in OSS. +// partSize the part size in bytes. +// options object's constraints, check out GetObject for the reference. +// +// error it's nil when the call succeeds, otherwise it's an error object. +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + uRange, err := GetRangeConfig(options) + if err != nil { + return err + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath) + if cpFilePath != "" { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) + } + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + absPath, _ := filepath.Abs(destFile) + cpFileName := getCpFileName(src, absPath, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// downloadWorkerArg is download worker's parameters +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// downloadPartHook is hook for test +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. +type defaultDownloadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// downloadWorker +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // Resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + + var respHeader http.Header + opts := make([]Option, len(arg.options)+3) + // Append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p, GetResponseHeader(&respHeader)) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(CrcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + _, err = io.Copy(fd, rd) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error()) + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// downloadScheduler +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// downloadPart defines download part +type downloadPart struct { + Index int // Part number, starting from 0 + Start int64 // Start index + End int64 // End index + Offset int64 // Offset + CRC64 uint64 // CRC check value of part +} + +// getDownloadParts gets download parts +func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart { + parts := []downloadPart{} + part := downloadPart{} + i := 0 + start, end := AdjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts +} + +// getObjectBytes gets object bytes length +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// combineCRCInParts caculates the total CRC of continuous parts +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// downloadFile downloads file concurrently without checkpoint. +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // If the file does not exist, create one. If exists, the download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + enableCRC := false + expectedCRC := (uint64)(0) + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + enableCRC = true + expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Get the parts of the file + parts := getDownloadParts(objectSize, partSize, uRange) + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the download workers + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Download parts concurrently + go downloadScheduler(jobs, parts) + + // Waiting for parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = CheckDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- Concurrent download with chcekpoint ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint content MD5 + FilePath string // Local file + Object string // Key + ObjStat objectStat // Object status + Parts []downloadPart // All download parts + PartStat []bool // Parts' download status + Start int64 // Start point of the file + End int64 // End point of the file + enableCRC bool // Whether has CRC check + CRC uint64 // CRC check value +} + +type objectStat struct { + Size int64 // Object size + LastModified string // Last modified time + Etag string // Etag +} + +// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated. +func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) { + // Compare the CP's Magic and the MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size, last modified time and etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // Check the download range + if uRange != nil { + start, end := AdjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump funciton dumps to file +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialize + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts gets unfinished parts +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes gets completed size +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initiates download tasks +func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error { + // CP + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + cp.enableCRC = true + cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Parts + cp.Parts = getDownloadParts(objectSize, partSize, uRange) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + err := os.Rename(downFilepath, cp.FilePath) + if err != nil { + return err + } + return os.Remove(cpFilePath) +} + +// downloadFileWithCp downloads files with checkpoint. +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // Load checkpoint data. + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + // Load error or data invalid. Re-initialize the download. + valid, err := dcp.isValid(meta, uRange) + if err != nil || !valid { + if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Create the file if not exists. Otherwise the parts download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Unfinished parts + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the download workers routine + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Concurrently downloads parts + go downloadScheduler(jobs, parts) + + // Wait for the parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = CheckDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 0000000..a877211 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,94 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // The error code returned from OSS to the caller + Message string `xml:"Message"` // The detail error message from OSS + RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request + HostID string `xml:"HostId"` // The OSS server cluster's Id + Endpoint string `xml:"Endpoint"` + RawMessage string // The raw messages from OSS + StatusCode int // HTTP status code +} + +// Error implements interface error +func (e ServiceError) Error() string { + if e.Endpoint == "" { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) + } + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // The expected HTTP stats code returned from OSS + got int // The actual HTTP status code from OSS +} + +// Error implements interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// CheckRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func CheckRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // Calculated CRC64 in client + serverCRC uint64 // Calculated CRC64 in server + operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc + requestID string // The request id of this operation +} + +// Error implements interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func CheckDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func CheckCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go new file mode 100644 index 0000000..943dc8f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go @@ -0,0 +1,28 @@ +// +build !go1.7 + +// "golang.org/x/time/rate" is depended on golang context package go1.7 onward +// this file is only for build,not supports limit upload speed +package oss + +import ( + "fmt" + "io" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +type OssLimiter struct { +} + +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + err = fmt.Errorf("rate.Limiter is not supported below version go1.7") + return nil, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go new file mode 100644 index 0000000..f6baf29 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go @@ -0,0 +1,90 @@ +// +build go1.7 + +package oss + +import ( + "fmt" + "io" + "math" + "time" + + "golang.org/x/time/rate" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +// OssLimiter wrapper rate.Limiter +type OssLimiter struct { + limiter *rate.Limiter +} + +// GetOssLimiter create OssLimiter +// uploadSpeed KB/s +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed) + + // first consume the initial full token,the limiter will behave more accurately + limiter.AllowN(time.Now(), uploadSpeed) + + return &OssLimiter{ + limiter: limiter, + }, nil +} + +// LimitSpeedReader for limit bandwidth upload +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +// Read +func (r *LimitSpeedReader) Read(p []byte) (n int, err error) { + n = 0 + err = nil + start := 0 + burst := r.ossLimiter.limiter.Burst() + var end int + var tmpN int + var tc int + for start < len(p) { + if start+burst*perTokenBandwidthSize < len(p) { + end = start + burst*perTokenBandwidthSize + } else { + end = len(p) + } + + tmpN, err = r.reader.Read(p[start:end]) + if tmpN > 0 { + n += tmpN + start = n + } + + if err != nil { + return + } + + tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize))) + now := time.Now() + re := r.ossLimiter.limiter.ReserveN(now, tc) + if !re.OK() { + err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d", + start, end, burst, perTokenBandwidthSize) + return + } + timeDelay := re.Delay() + time.Sleep(timeDelay) + } + return +} + +// Close ... +func (r *LimitSpeedReader) Close() error { + rc, ok := r.reader.(io.ReadCloser) + if ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go new file mode 100644 index 0000000..bf5ba07 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go @@ -0,0 +1,257 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "time" +) + +// +// CreateLiveChannel create a live-channel +// +// channelName the name of the channel +// config configuration of the channel +// +// CreateLiveChannelResult the result of create live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) { + var out CreateLiveChannelResult + + bs, err := xml.Marshal(config) + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["live"] = nil + resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled +// +// channelName the name of the channel +// status enabled/disabled +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error { + params := map[string]interface{}{} + params["live"] = nil + params["status"] = status + + resp, err := bucket.do("PUT", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// error nil if success, otherwise error +// +func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + key := fmt.Sprintf("%s/%s", channelName, playlistName) + resp, err := bucket.do("POST", key, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime +// +// channelName the name of the channel +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error nil if success, otherwise error +// +func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// +// GetLiveChannelStat Get the state of the live-channel +// +// channelName the name of the channel +// +// LiveChannelStat the state of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) { + var out LiveChannelStat + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "stat" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelInfo Get the configuration info of the live-channel +// +// channelName the name of the channel +// +// LiveChannelConfiguration the configuration info of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) { + var out LiveChannelConfiguration + params := map[string]interface{}{} + params["live"] = nil + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelHistory Get push records of live-channel +// +// channelName the name of the channel +// +// LiveChannelHistory push records +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) { + var out LiveChannelHistory + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "history" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// ListLiveChannel list the live-channels +// +// options Prefix: filter by the name start with the value of "Prefix" +// MaxKeys: the maximum count returned +// Marker: cursor from which starting list +// +// ListLiveChannelResult live-channel list +// error nil if success, otherwise error +// +func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) { + var out ListLiveChannelResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["live"] = nil + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted. +// +// channelName the name of the channel +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteLiveChannel(channelName string) error { + params := map[string]interface{}{} + params["live"] = nil + + if channelName == "" { + return fmt.Errorf("invalid argument: channel name is empty") + } + + resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel. +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// expires expiration (in seconds) +// +// string singed rtmp push stream url +// error nil if success, otherwise error +// +func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) { + if expires <= 0 { + return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires) + } + expiration := time.Now().Unix() + expires + + return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 0000000..64f4dcc --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,572 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", + ".323": "text/h323", + ".aab": "application/x-authoware-bin", + ".aam": "application/x-authoware-map", + ".aas": "application/x-authoware-seg", + ".acx": "application/internet-property-stream", + ".als": "audio/X-Alpha5", + ".amc": "application/x-mpeg", + ".ani": "application/octet-stream", + ".asd": "application/astound", + ".asf": "video/x-ms-asf", + ".asn": "application/astound", + ".asp": "application/x-asap", + ".asr": "video/x-ms-asf", + ".asx": "video/x-ms-asf", + ".avb": "application/octet-stream", + ".awb": "audio/amr-wb", + ".axs": "application/olescript", + ".bas": "text/plain", + ".bin ": "application/octet-stream", + ".bld": "application/bld", + ".bld2": "application/bld2", + ".bpk": "application/octet-stream", + ".c": "text/plain", + ".cal": "image/x-cals", + ".cat": "application/vnd.ms-pkiseccat", + ".ccn": "application/x-cnc", + ".cco": "application/x-cocoa", + ".cer": "application/x-x509-ca-cert", + ".cgi": "magnus-internal/cgi", + ".chat": "application/x-chat", + ".clp": "application/x-msclip", + ".cmx": "image/x-cmx", + ".co": "application/x-cult3d-object", + ".cod": "image/cis-cod", + ".conf": "text/plain", + ".cpp": "text/plain", + ".crd": "application/x-mscardfile", + ".crl": "application/pkix-crl", + ".crt": "application/x-x509-ca-cert", + ".csm": "chemical/x-csml", + ".csml": "chemical/x-csml", + ".cur": "application/octet-stream", + ".dcm": "x-lml/x-evm", + ".dcx": "image/x-dcx", + ".der": "application/x-x509-ca-cert", + ".dhtml": "text/html", + ".dot": "application/msword", + ".dwf": "drawing/x-dwf", + ".dwg": "application/x-autocad", + ".dxf": "application/x-autocad", + ".ebk": "application/x-expandedbook", + ".emb": "chemical/x-embl-dl-nucleotide", + ".embl": "chemical/x-embl-dl-nucleotide", + ".epub": "application/epub+zip", + ".eri": "image/x-eri", + ".es": "audio/echospeech", + ".esl": "audio/echospeech", + ".etc": "application/x-earthtime", + ".evm": "x-lml/x-evm", + ".evy": "application/envoy", + ".fh4": "image/x-freehand", + ".fh5": "image/x-freehand", + ".fhc": "image/x-freehand", + ".fif": "application/fractals", + ".flr": "x-world/x-vrml", + ".fm": "application/x-maker", + ".fpx": "image/x-fpx", + ".fvi": "video/isivideo", + ".gau": "chemical/x-gaussian-input", + ".gca": "application/x-gca-compressed", + ".gdb": "x-lml/x-gdb", + ".gps": "application/x-gps", + ".h": "text/plain", + ".hdm": "text/x-hdml", + ".hdml": "text/x-hdml", + ".hlp": "application/winhlp", + ".hta": "application/hta", + ".htc": "text/x-component", + ".hts": "text/html", + ".htt": "text/webviewhtml", + ".ifm": "image/gif", + ".ifs": "image/ifs", + ".iii": "application/x-iphone", + ".imy": "audio/melody", + ".ins": "application/x-internet-signup", + ".ips": "application/x-ipscript", + ".ipx": "application/x-ipix", + ".isp": "application/x-internet-signup", + ".it": "audio/x-mod", + ".itz": "audio/x-mod", + ".ivr": "i-world/i-vrml", + ".j2k": "image/j2k", + ".jam": "application/x-jam", + ".java": "text/plain", + ".jfif": "image/pipeg", + ".jpz": "image/jpeg", + ".jwc": "application/jwc", + ".kjx": "application/x-kjx", + ".lak": "x-lml/x-lak", + ".lcc": "application/fastman", + ".lcl": "application/x-digitalloca", + ".lcr": "application/x-digitalloca", + ".lgh": "application/lgh", + ".lml": "x-lml/x-lml", + ".lmlpack": "x-lml/x-lmlpack", + ".log": "text/plain", + ".lsf": "video/x-la-asf", + ".lsx": "video/x-la-asf", + ".m13": "application/x-msmediaview", + ".m14": "application/x-msmediaview", + ".m15": "audio/x-mod", + ".m3url": "audio/x-mpegurl", + ".m4b": "audio/mp4a-latm", + ".ma1": "audio/ma1", + ".ma2": "audio/ma2", + ".ma3": "audio/ma3", + ".ma5": "audio/ma5", + ".map": "magnus-internal/imagemap", + ".mbd": "application/mbedlet", + ".mct": "application/x-mascot", + ".mdb": "application/x-msaccess", + ".mdz": "audio/x-mod", + ".mel": "text/x-vmel", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mi": "application/x-mif", + ".mil": "image/x-cals", + ".mio": "audio/x-mio", + ".mmf": "application/x-skt-lbs", + ".mng": "video/x-mng", + ".mny": "application/x-msmoney", + ".moc": "application/x-mocha", + ".mocha": "application/x-mocha", + ".mod": "audio/x-mod", + ".mof": "application/x-yumekara", + ".mol": "chemical/x-mdl-molfile", + ".mop": "chemical/x-mopac-input", + ".mpa": "video/mpeg", + ".mpc": "application/vnd.mpohun.certificate", + ".mpg4": "video/mp4", + ".mpn": "application/vnd.mophun.application", + ".mpp": "application/vnd.ms-project", + ".mps": "application/x-mapserver", + ".mpv2": "video/mpeg", + ".mrl": "text/x-mrml", + ".mrm": "application/x-mrm", + ".msg": "application/vnd.ms-outlook", + ".mts": "application/metastream", + ".mtx": "application/metastream", + ".mtz": "application/metastream", + ".mvb": "application/x-msmediaview", + ".mzv": "application/metastream", + ".nar": "application/zip", + ".nbmp": "image/nbmp", + ".ndb": "x-lml/x-ndb", + ".ndwn": "application/ndwn", + ".nif": "application/x-nif", + ".nmz": "application/x-scream", + ".nokia-op-logo": "image/vnd.nok-oplogo-color", + ".npx": "application/x-netfpx", + ".nsnd": "audio/nsnd", + ".nva": "application/x-neva1", + ".nws": "message/rfc822", + ".oom": "application/x-AtlasMate-Plugin", + ".p10": "application/pkcs10", + ".p12": "application/x-pkcs12", + ".p7b": "application/x-pkcs7-certificates", + ".p7c": "application/x-pkcs7-mime", + ".p7m": "application/x-pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/x-pkcs7-signature", + ".pac": "audio/x-pac", + ".pae": "audio/x-epac", + ".pan": "application/x-pan", + ".pcx": "image/x-pcx", + ".pda": "image/x-pda", + ".pfr": "application/font-tdpfr", + ".pfx": "application/x-pkcs12", + ".pko": "application/ynd.ms-pkipko", + ".pm": "application/x-perl", + ".pma": "application/x-perfmon", + ".pmc": "application/x-perfmon", + ".pmd": "application/x-pmd", + ".pml": "application/x-perfmon", + ".pmr": "application/x-perfmon", + ".pmw": "application/x-perfmon", + ".pnz": "image/png", + ".pot,": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pqf": "application/x-cprplayer", + ".pqi": "application/cprplayer", + ".prc": "application/x-prc", + ".prf": "application/pics-rules", + ".prop": "text/plain", + ".proxy": "application/x-ns-proxy-autoconfig", + ".ptlk": "application/listenup", + ".pub": "application/x-mspublisher", + ".pvx": "video/x-pv-pvx", + ".qcp": "audio/vnd.qcelp", + ".r3t": "text/vnd.rn-realtext3d", + ".rar": "application/octet-stream", + ".rc": "text/plain", + ".rf": "image/vnd.rn-realflash", + ".rlf": "application/x-richlink", + ".rmf": "audio/x-rmf", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmvb": "audio/x-pn-realaudio", + ".rnx": "application/vnd.rn-realplayer", + ".rp": "image/vnd.rn-realpix", + ".rt": "text/vnd.rn-realtext", + ".rte": "x-lml/x-gps", + ".rtg": "application/metastream", + ".rv": "video/vnd.rn-realvideo", + ".rwc": "application/x-rogerwilco", + ".s3m": "audio/x-mod", + ".s3z": "audio/x-mod", + ".sca": "application/x-supercard", + ".scd": "application/x-msschedule", + ".sct": "text/scriptlet", + ".sdf": "application/e-score", + ".sea": "application/x-stuffit", + ".setpay": "application/set-payment-initiation", + ".setreg": "application/set-registration-initiation", + ".shtml": "text/html", + ".shtm": "text/html", + ".shw": "application/presentations", + ".si6": "image/si6", + ".si7": "image/vnd.stiwap.sis", + ".si9": "image/vnd.lgtwap.sis", + ".slc": "application/x-salsa", + ".smd": "audio/x-smd", + ".smp": "application/studiom", + ".smz": "audio/x-smd", + ".spc": "application/x-pkcs7-certificates", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".sdp": "application/sdp", + ".spt": "application/x-spt", + ".sst": "application/vnd.ms-pkicertstore", + ".stk": "application/hyperstudio", + ".stl": "application/vnd.ms-pkistl", + ".stm": "text/html", + ".svf": "image/vnd", + ".svh": "image/svh", + ".svr": "x-world/x-svr", + ".swfl": "application/x-shockwave-flash", + ".tad": "application/octet-stream", + ".talk": "text/x-speech", + ".taz": "application/x-tar", + ".tbp": "application/x-timbuktu", + ".tbt": "application/x-timbuktu", + ".tgz": "application/x-compressed", + ".thm": "application/vnd.eri.thm", + ".tki": "application/x-tkined", + ".tkined": "application/x-tkined", + ".toc": "application/toc", + ".toy": "image/toy", + ".trk": "x-lml/x-gps", + ".trm": "application/x-msterminal", + ".tsi": "audio/tsplayer", + ".tsp": "application/dsptype", + ".ttf": "application/octet-stream", + ".ttz": "application/t-time", + ".uls": "text/iuls", + ".ult": "audio/x-mod", + ".uu": "application/x-uuencode", + ".uue": "application/x-uuencode", + ".vcf": "text/x-vcard", + ".vdo": "video/vdo", + ".vib": "audio/vib", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".vmi": "application/x-dreamcast-vms-info", + ".vms": "application/x-dreamcast-vms", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq", + ".vre": "x-world/x-vream", + ".vrt": "x-world/x-vrt", + ".vrw": "x-world/x-vream", + ".vts": "workbook/formulaone", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".web": "application/vnd.xara", + ".wi": "image/wavelet", + ".wis": "application/x-InstallShield", + ".wks": "application/vnd.ms-works", + ".wmd": "application/x-ms-wmd", + ".wmf": "application/x-msmetafile", + ".wmlscript": "text/vnd.wap.wmlscript", + ".wmz": "application/x-ms-wmz", + ".wpng": "image/x-up-wpng", + ".wps": "application/vnd.ms-works", + ".wpt": "x-lml/x-gps", + ".wri": "application/x-mswrite", + ".wrz": "x-world/x-vrml", + ".ws": "text/vnd.wap.wmlscript", + ".wsc": "application/vnd.wap.wmlscriptc", + ".wv": "video/wavelet", + ".wxl": "application/x-wxl", + ".x-gzip": "application/x-gzip", + ".xaf": "x-world/x-vrml", + ".xar": "application/vnd.xara", + ".xdm": "application/x-xdma", + ".xdma": "application/x-xdma", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xhtm": "application/xhtml+xml", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xll": "application/x-excel", + ".xlm": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xm": "audio/x-mod", + ".xmz": "audio/x-mod", + ".xof": "x-world/x-vrml", + ".xpi": "application/x-xpinstall", + ".xsit": "text/xml", + ".yz1": "application/x-yz1", + ".z": "application/x-compress", + ".zac": "application/x-zaurus-zac", + ".json": "application/json", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 0000000..b0b4a50 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,69 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response defines HTTP response from OSS +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +func (r *Response) Read(p []byte) (n int, err error) { + return r.Body.Read(p) +} + +// Close close http reponse body +func (r *Response) Close() error { + return r.Body.Close() +} + +// PutObjectRequest is the request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest is the request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult is the result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest is the requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult is the result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest is the request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult is the result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 0000000..56ed8ca --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,474 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" +) + +// CopyFile is multipart copy object +// +// srcBucketName source bucket name +// srcObjectKey source object name +// destObjectKey target object name in the form of bucketname.objectkey +// partSize the part size in byte. +// options object's contraints. Check out function InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId) + if cpFilePath != "" { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) + } + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + cpFileName := getCpFileName(src, dest, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- Concurrently copy without checkpoint --------- + +// copyWorkerArg defines the copy worker arguments +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// copyPartHook is the hook for testing purpose +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// copyWorker copies worker +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// copyScheduler +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// copyPart structure +type copyPart struct { + Number int // Part number (from 1 to 10,000) + Start int64 // The start index in the source file. + End int64 // The end index in the source file +} + +// getCopyParts calculates copy parts +func getCopyParts(objectSize, partSize int64) []copyPart { + parts := []copyPart{} + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts +} + +// getSrcObjectBytes gets the source file size +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// copyFile is a concurrently copy without checkpoint +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + // Get copy parts + parts := getCopyParts(objectSize, partSize) + // Initialize the multipart upload + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start to copy workers + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts finished. + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur, abortOptions...) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multipart upload + _, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- Concurrently copy with checkpoint ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // Magic + MD5 string // CP content MD5 + SrcBucketName string // Source bucket + SrcObjectKey string // Source object + DestBucketName string // Target bucket + DestObjectKey string // Target object + CopyID string // Copy ID + ObjStat objectStat // Object stat + Parts []copyPart // Copy parts + CopyParts []UploadPart // The uploaded parts + PartStat []bool // The part status +} + +// isValid checks if the data is valid which means CP is valid and object is not updated. +func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { + // Compare CP's magic number and the MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size and last modified time and etag. + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// load loads from the checkpoint file +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// update updates the parts status +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump dumps the CP to the file +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts returns unfinished parts +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes returns finished bytes count +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initializes the multipart upload +func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // CP + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // Parts + cp.Parts = getCopyParts(objectSize, partSize) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // Init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// copyFileWithCp is concurrently copy with checkpoint +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // Load CP data + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + // Load error or the CP data is invalid---reinitialize + valid, err := ccp.isValid(meta) + if err != nil || !valid { + if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Unfinished parts + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the worker coroutines + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts completed. + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 0000000..9e71419 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,305 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" +) + +// InitiateMultipartUpload initializes multipart upload +// +// objectKey object name +// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, Meta, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := AddContentType(options, objectKey) + params, _ := GetRawParams(options) + paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"} + ConvertEmptyValueToNil(params, paramKeys) + params["uploads"] = nil + + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// UploadPart uploads parts +// +// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts. +// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file. +// And thus with the same part number and upload Id, another part upload will overwrite the data. +// Except the last one, minimal part size is 100KB. There's no limit on the last part size. +// +// imur the returned value of InitiateMultipartUpload. +// reader io.Reader the reader for the part's data. +// size the part size. +// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error. +// +// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// UploadPartFromFile uploads part from the file. +// +// imur the return value of a successful InitiateMultipartUpload. +// filePath the local file path to upload. +// startPosition the start position in the local file. +// partSize the part size. +// partNumber the part number (from 1 to 10,000) +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// DoUploadPart does the actual part upload. +// +// request part upload request +// +// UploadPartResult the result of uploading part. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := GetProgressListener(options) + options = append(options, ContentLength(request.PartSize)) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, options, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// UploadPartCopy uploads part copy +// +// imur the return value of InitiateMultipartUpload +// copySrc source Object name +// startPosition the part's start index in the source file +// partSize the part size +// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error. +// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error. +// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + var opts []Option + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), + CopySourceRange(startPosition, partSize)} + } else { + opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)), + CopySourceRange(startPosition, partSize)} + options = DeleteOption(options, versionIdKey) + } + + opts = append(opts, options...) + + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// CompleteMultipartUpload completes the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy. +// +// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(UploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AbortMultipartUpload aborts the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// ListUploadedParts lists the uploaded parts. +// +// imur the return value of InitiateMultipartUpload. +// +// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + options = append(options, EncodingType("url")) + + params := map[string]interface{}{} + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListUploadedPartsResult(&out) + return out, err +} + +// ListMultipartUploads lists all ongoing multipart upload tasks +// +// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order; +// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys. +// +// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 0000000..ccae9f4 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,689 @@ +package oss + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL parameter + optionHTTP optionType = "HTTPHeader" // HTTP header + optionArg optionType = "FuncArgument" // Function argument +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" + responseHeader = "x-response-header" + redundancyType = "redundancy-type" + objectHashFunc = "object-hash-func" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option HTTP option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) Option { + return setHeader(HTTPHeaderContentLanguage, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId +func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) +} + +// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header +func ServerSideDataEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideDataEncryption, value) +} + +// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header +func SSECAlgorithm(value string) Option { + return setHeader(HTTPHeaderSSECAlgorithm, value) +} + +// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header +func SSECKey(value string) Option { + return setHeader(HTTPHeaderSSECKey, value) +} + +// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header +func SSECKeyMd5(value string) Option { + return setHeader(HTTPHeaderSSECKeyMd5, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// ObjectStorageClass is an option to set the storage class of object +func ObjectStorageClass(storageClass StorageClassType) Option { + return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) +} + +// Callback is an option to set callback values +func Callback(callback string) Option { + return setHeader(HTTPHeaderOssCallback, callback) +} + +// CallbackVar is an option to set callback user defined values +func CallbackVar(callbackVar string) Option { + return setHeader(HTTPHeaderOssCallbackVar, callbackVar) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(payerType PayerType) Option { + return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType))) +} + +// RequestPayerParam is an option to set payer who pay for the request +func RequestPayerParam(payerType PayerType) Option { + return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType))) +} + +// SetTagging is an option to set object tagging +func SetTagging(tagging Tagging) Option { + if len(tagging.Tags) == 0 { + return nil + } + + taggingValue := "" + for index, tag := range tagging.Tags { + if index != 0 { + taggingValue += "&" + } + taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value) + } + return setHeader(HTTPHeaderOssTagging, taggingValue) +} + +// TaggingDirective is an option to set X-Oss-Metadata-Directive header +func TaggingDirective(directive TaggingDirectiveType) Option { + return setHeader(HTTPHeaderOssTaggingDirective, string(directive)) +} + +// ACReqMethod is an option to set Access-Control-Request-Method header +func ACReqMethod(value string) Option { + return setHeader(HTTPHeaderACReqMethod, value) +} + +// ACReqHeaders is an option to set Access-Control-Request-Headers header +func ACReqHeaders(value string) Option { + return setHeader(HTTPHeaderACReqHeaders, value) +} + +// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit +func TrafficLimitHeader(value int64) Option { + return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10)) +} + +// UserAgentHeader is an option to set HTTPHeaderUserAgent +func UserAgentHeader(ua string) Option { + return setHeader(HTTPHeaderUserAgent, ua) +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func ForbidOverWrite(forbidWrite bool) Option { + if forbidWrite { + return setHeader(HTTPHeaderOssForbidOverWrite, "true") + } else { + return setHeader(HTTPHeaderOssForbidOverWrite, "false") + } +} + +// RangeBehavior is an option to set Range value, such as "standard" +func RangeBehavior(value string) Option { + return setHeader(HTTPHeaderOssRangeBehavior, value) +} + +func PartHashCtxHeader(value string) Option { + return setHeader(HTTPHeaderOssHashCtx, value) +} + +func PartMd5CtxHeader(value string) Option { + return setHeader(HTTPHeaderOssMd5Ctx, value) +} + +func PartHashCtxParam(value string) Option { + return addParam("x-oss-hash-ctx", value) +} + +func PartMd5CtxParam(value string) Option { + return addParam("x-oss-md5-ctx", value) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// VersionIdMarker is an option to set version-id-marker parameter +func VersionIdMarker(value string) Option { + return addParam("version-id-marker", value) +} + +// VersionId is an option to set versionId parameter +func VersionId(value string) Option { + return addParam("versionId", value) +} + +// TagKey is an option to set tag key parameter +func TagKey(value string) Option { + return addParam("tag-key", value) +} + +// TagValue is an option to set tag value parameter +func TagValue(value string) Option { + return addParam("tag-value", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) Option { + return addParam("max-parts", strconv.Itoa(value)) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) Option { + return addParam("part-number-marker", strconv.Itoa(value)) +} + +// Sequential is an option to set sequential parameter for InitiateMultipartUpload +func Sequential() Option { + return addParam("sequential", "") +} + +// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload +func WithHashContext() Option { + return addParam("withHashContext", "") +} + +// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload +func EnableMd5() Option { + return addParam("x-oss-enable-md5", "") +} + +// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload +func EnableSha1() Option { + return addParam("x-oss-enable-sha1", "") +} + +// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload +func EnableSha256() Option { + return addParam("x-oss-enable-sha256", "") +} + +// ListType is an option to set List-type parameter for ListObjectsV2 +func ListType(value int) Option { + return addParam("list-type", strconv.Itoa(value)) +} + +// StartAfter is an option to set start-after parameter for ListObjectsV2 +func StartAfter(value string) Option { + return addParam("start-after", value) +} + +// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2 +func ContinuationToken(value string) Option { + if value == "" { + return addParam("continuation-token", nil) + } + return addParam("continuation-token", value) +} + +// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2 +func FetchOwner(value bool) Option { + if value { + return addParam("fetch-owner", "true") + } + return addParam("fetch-owner", "false") +} + +// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false. +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket storage class +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// RedundancyType bucket data redundancy type +func RedundancyType(value DataRedundancyType) Option { + return addArg(redundancyType, value) +} + +// RedundancyType bucket data redundancy type +func ObjectHashFunc(value ObjecthashFuncType) Option { + return addArg(objectHashFunc, value) +} + +// Checkpoint configuration +type cpConfig struct { + IsEnable bool + FilePath string + DirPath string +} + +// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile. +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) +} + +// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile. +func CheckpointDir(isEnable bool, dirPath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) +} + +// Routines DownloadFile/UploadFile routine count +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC Init AppendObject CRC +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// GetResponseHeader for get response http header +func GetResponseHeader(respHeader *http.Header) Option { + return addArg(responseHeader, respHeader) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set x-oss-process param +func Process(value string) Option { + return addParam("x-oss-process", value) +} + +// TrafficLimitParam is a option to set x-oss-traffic-limit +func TrafficLimitParam(value int64) Option { + return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10)) +} + +// SetHeader Allow users to set personalized http headers +func SetHeader(key string, value interface{}) Option { + return setHeader(key, value) +} + +// AddParam Allow users to set personalized http params +func AddParam(key string, value interface{}) Option { + return addParam(key, value) +} + +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func GetRawParams(options []Option) (map[string]interface{}, error) { + // Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // Serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func IsOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} + +func DeleteOption(options []Option, strKey string) []Option { + var outOption []Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + option(params) + _, exist := params[strKey] + if !exist { + outOption = append(outOption, option) + } else { + delete(params, strKey) + } + } + } + return outOption +} + +func GetRequestId(header http.Header) string { + return header.Get("x-oss-request-id") +} + +func GetVersionId(header http.Header) string { + return header.Get("x-oss-version-id") +} + +func GetCopySrcVersionId(header http.Header) string { + return header.Get("x-oss-copy-source-version-id") +} + +func GetDeleteMark(header http.Header) bool { + value := header.Get("x-oss-delete-marker") + if strings.ToUpper(value) == "TRUE" { + return true + } + return false +} + +func GetQosDelayTime(header http.Header) string { + return header.Get("x-oss-qos-delay-time") +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func AllowSameActionOverLap(enabled bool) Option { + if enabled { + return setHeader(HTTPHeaderAllowSameActionOverLap, "true") + } else { + return setHeader(HTTPHeaderAllowSameActionOverLap, "false") + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 0000000..9f3aa9f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,116 @@ +package oss + +import ( + "io" +) + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // Read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // CRC + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // Progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n)) + publishProgress(t.listener, event) + } + // Track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} + +func (t *teeReader) Close() error { + if rc, ok := t.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go new file mode 100644 index 0000000..d09bc5e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go @@ -0,0 +1,11 @@ +// +build !go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward + +func disableHTTPRedirect(client *http.Client) { + +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go new file mode 100644 index 0000000..5b0bb86 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward +func disableHTTPRedirect(client *http.Client) { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go new file mode 100644 index 0000000..2e0da46 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go @@ -0,0 +1,197 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// CreateSelectCsvObjectMeta is Creating csv object meta +// +// key the object key. +// csvMeta the csv file meta +// options the options for create csv Meta of the object. +// +// MetaEndFrameCSV the csv file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) { + var endFrame MetaEndFrameCSV + params := map[string]interface{}{} + params["x-oss-process"] = "csv/meta" + + csvMeta.encodeBase64() + bs, err := xml.Marshal(csvMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameCSV, err +} + +// CreateSelectJsonObjectMeta is Creating json object meta +// +// key the object key. +// csvMeta the json file meta +// options the options for create json Meta of the object. +// +// MetaEndFrameJSON the json file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) { + var endFrame MetaEndFrameJSON + params := map[string]interface{}{} + params["x-oss-process"] = "json/meta" + + bs, err := xml.Marshal(jsonMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameJSON, err +} + +// SelectObject is the select object api, approve csv and json file. +// +// key the object key. +// selectReq the request data for select object +// options the options for select file of the object. +// +// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) { + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return nil, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return nil, err + } + if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true { + resp.Frame.EnablePayloadCrc = true + } + resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE" + + return resp, err +} + +// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file. +// +// key the object key. +// params the resource of oss approve csv/meta, json/meta, csv/select, json/select. +// buf the request data trans to buffer. +// options the options for select file of the object. +// +// SelectObjectResponse the response of select object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) { + resp, err := bucket.do("POST", key, params, options, buf, nil) + if err != nil { + return nil, err + } + + result := &SelectObjectResponse{ + Body: resp.Body, + StatusCode: resp.StatusCode, + Frame: SelectObjectResult{}, + } + result.Headers = resp.Headers + // result.Frame = SelectObjectResult{} + result.ReadTimeOut = bucket.GetConfig().Timeout + + // Progress + listener := GetProgressListener(options) + + // CRC32 + crcCalc := crc32.NewIEEE() + result.WriterForCheckCrc32 = crcCalc + result.Body = TeeReader(resp.Body, nil, 0, listener, nil) + + err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK}) + + return result, err +} + +// SelectObjectIntoFile is the selectObject to file api +// +// key the object key. +// fileName saving file's name to localstation. +// selectReq the request data for select object +// options the options for select file of the object. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error { + tempFilePath := fileName + TempFileSuffix + + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return err + } + defer resp.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, resp) + fd.Close() + if err != nil { + return err + } + + return os.Rename(tempFilePath, fileName) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go new file mode 100644 index 0000000..8b75782 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go @@ -0,0 +1,364 @@ +package oss + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "time" +) + +// The adapter class for Select object's response. +// The response consists of frames. Each frame has the following format: + +// Type | Payload Length | Header Checksum | Payload | Payload Checksum + +// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes---------> +// And we have three kind of frames. +// Data Frame: +// Type:8388609 +// Payload: Offset | Data +// <-8 bytes> + +// Continuous Frame +// Type:8388612 +// Payload: Offset (8-bytes) + +// End Frame +// Type:8388613 +// Payload: Offset | total scanned bytes | http status code | error message +// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe---> + +// SelectObjectResponse defines HTTP response from OSS SelectObject +type SelectObjectResponse struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + Frame SelectObjectResult + ReadTimeOut uint + ClientCRC32 uint32 + ServerCRC32 uint32 + WriterForCheckCrc32 hash.Hash32 + Finish bool +} + +func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) { + n, err = sr.readFrames(p) + return +} + +// Close http reponse body +func (sr *SelectObjectResponse) Close() error { + return sr.Body.Close() +} + +// PostSelectResult is the request of SelectObject +type PostSelectResult struct { + Response *SelectObjectResponse +} + +// readFrames is read Frame +func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) { + var nn int + var err error + var checkValid bool + if sr.Frame.OutputRawData == true { + nn, err = sr.Body.Read(p) + return nn, err + } + + if sr.Finish { + return 0, io.EOF + } + + for { + // if this Frame is Readed, then not reading Header + if sr.Frame.OpenLine != true { + err = sr.analysisHeader() + if err != nil { + return nn, err + } + } + + if sr.Frame.FrameType == DataFrameType { + n, err := sr.analysisData(p[nn:]) + if err != nil { + return nn, err + } + nn += n + + // if this Frame is readed all data, then empty the Frame to read it with next frame + if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + sr.emptyFrame() + } + + if nn == len(p) { + return nn, nil + } + } else if sr.Frame.FrameType == ContinuousFrameType { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + } else if sr.Frame.FrameType == EndFrameType { + err = sr.analysisEndFrame() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameCSVType { + err = sr.analysisMetaEndFrameCSV() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameJSONType { + err = sr.analysisMetaEndFrameJSON() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } + } + return nn, nil +} + +type chanReadIO struct { + readLen int + err error +} + +func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) { + r := sr.Body + ch := make(chan chanReadIO, 1) + defer close(ch) + go func(p []byte) { + var needReadLength int + readChan := chanReadIO{} + needReadLength = len(p) + for { + n, err := r.Read(p[readChan.readLen:needReadLength]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + return + } + + if readChan.readLen == needReadLength { + break + } + } + ch <- readChan + }(p) + + select { + case <-time.After(time.Second * timeOut): + return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p)) + case result := <-ch: + return result.readLen, result.err + } +} + +// analysisHeader is reading selectObject response body's header +func (sr *SelectObjectResponse) analysisHeader() error { + headFrameByte := make([]byte, 20) + _, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) + } + + frameTypeByte := headFrameByte[0:4] + sr.Frame.Version = frameTypeByte[0] + frameTypeByte[0] = 0 + bytesToInt(frameTypeByte, &sr.Frame.FrameType) + + if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType && + sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType { + return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType) + } + + payloadLengthByte := headFrameByte[4:8] + bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength) + headCheckSumByte := headFrameByte[8:12] + bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum) + byteOffset := headFrameByte[12:20] + bytesToInt(byteOffset, &sr.Frame.Offset) + sr.Frame.OpenLine = true + + err = sr.writerCheckCrc32(byteOffset) + return err +} + +// analysisData is reading the DataFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) { + var needReadLength int32 + lenP := int32(len(p)) + restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength + if lenP <= restByteLength { + needReadLength = lenP + } else { + needReadLength = restByteLength + } + n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut)) + if err != nil { + return n, fmt.Errorf("read frame data error,%s", err.Error()) + } + sr.Frame.ConsumedBytesLength += int32(n) + err = sr.writerCheckCrc32(p[:n]) + return n, err +} + +// analysisEndFrame is reading the EndFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisEndFrame() error { + var eF EndFrame + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read end frame error:%s", err.Error()) + } + bytesToInt(payLoadBytes[0:8], &eF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode) + errMsgLength := sr.Frame.PayloadLength - 20 + eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) + sr.Frame.EndFrame.TotalScanned = eF.TotalScanned + sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode + sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error { + var mCF MetaEndFrameCSV + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end csv frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mCF.Status) + bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mCF.RowsCount) + bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount) + errMsgLength := sr.Frame.PayloadLength - 36 + mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) + sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg + sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned + sr.Frame.MetaEndFrameCSV.Status = mCF.Status + sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount + sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount + sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error { + var mJF MetaEndFrameJSON + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end json frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mJF.Status) + bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mJF.RowsCount) + errMsgLength := sr.Frame.PayloadLength - 32 + mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) + sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg + sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned + sr.Frame.MetaEndFrameJSON.Status = mJF.Status + sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount + sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount + + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) { + payLoadChecksumByte := make([]byte, 4) + n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut)) + if n == 4 { + bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum) + sr.ServerCRC32 = sr.Frame.PayloadChecksum + sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32() + if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 { + return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d", + sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32) + } + return true, err + } + return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) +} + +func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) { + err = nil + if sr.Frame.EnablePayloadCrc == true { + _, err = sr.WriterForCheckCrc32.Write(p) + } + return err +} + +// emptyFrame is emptying SelectObjectResponse Frame information +func (sr *SelectObjectResponse) emptyFrame() { + crcCalc := crc32.NewIEEE() + sr.WriterForCheckCrc32 = crcCalc + sr.Finish = false + + sr.Frame.ConsumedBytesLength = 0 + sr.Frame.OpenLine = false + sr.Frame.Version = byte(0) + sr.Frame.FrameType = 0 + sr.Frame.PayloadLength = 0 + sr.Frame.HeaderCheckSum = 0 + sr.Frame.Offset = 0 + sr.Frame.Data = "" + + sr.Frame.EndFrame.TotalScanned = 0 + sr.Frame.EndFrame.HTTPStatusCode = 0 + sr.Frame.EndFrame.ErrorMsg = "" + + sr.Frame.MetaEndFrameCSV.TotalScanned = 0 + sr.Frame.MetaEndFrameCSV.Status = 0 + sr.Frame.MetaEndFrameCSV.SplitsCount = 0 + sr.Frame.MetaEndFrameCSV.RowsCount = 0 + sr.Frame.MetaEndFrameCSV.ColumnsCount = 0 + sr.Frame.MetaEndFrameCSV.ErrorMsg = "" + + sr.Frame.MetaEndFrameJSON.TotalScanned = 0 + sr.Frame.MetaEndFrameJSON.Status = 0 + sr.Frame.MetaEndFrameJSON.SplitsCount = 0 + sr.Frame.MetaEndFrameJSON.RowsCount = 0 + sr.Frame.MetaEndFrameJSON.ErrorMsg = "" + + sr.Frame.PayloadChecksum = 0 +} + +// bytesToInt byte's array trans to int +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go new file mode 100644 index 0000000..4fb8b17 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go new file mode 100644 index 0000000..40ae049 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go @@ -0,0 +1,44 @@ +// +build go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConns: httpMaxConns.MaxIdleConns, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + MaxConnsPerHost: httpMaxConns.MaxConnsPerHost, + IdleConnTimeout: httpTimeOut.IdleConnTimeout, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 0000000..e1d79d7 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,1306 @@ +package oss + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "net/url" + "time" +) + +// ListBucketsResult defines the result object from ListBuckets request +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner Owner `xml:"Owner"` // The owner information + Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list +} + +// BucketProperties defines bucket properties +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket create time + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// ListCloudBoxesResult defines the result object from ListBuckets request +type ListCloudBoxResult struct { + XMLName xml.Name `xml:"ListCloudBoxResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining cloudboxes to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner string `xml:"Owner>DisplayName"` // The owner information + CloudBoxes []CloudBoxProperties `xml:"CloudBoxes>CloudBox"` // The cloudbox list +} + +// CloudBoxProperties defines cloudbox properties +type CloudBoxProperties struct { + XMLName xml.Name `xml:"CloudBox"` + ID string `xml:"ID"` + Name string `xml:"Name"` + Region string `xml:"Region"` + ControlEndpoint string `xml:"ControlEndpoint"` + DataEndpoint string `xml:"DataEndpoint"` +} + +// GetBucketACLResult defines GetBucketACL request's result +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner +} + +// LifecycleConfiguration is the Bucket Lifecycle configuration +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule defines Lifecycle rules +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID,omitempty"` // The rule ID + Prefix string `xml:"Prefix"` // The object key prefix + Status string `xml:"Status"` // The rule status (enabled or not) + Tags []Tag `xml:"Tag,omitempty"` // the tags property + Expiration *LifecycleExpiration `xml:"Expiration,omitempty"` // The expiration property + Transitions []LifecycleTransition `xml:"Transition,omitempty"` // The transition property + AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property + NonVersionExpiration *LifecycleVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` + // Deprecated: Use NonVersionTransitions instead. + NonVersionTransition *LifecycleVersionTransition `xml:"-"` // NonVersionTransition is not suggested to use + NonVersionTransitions []LifecycleVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` +} + +// LifecycleExpiration defines the rule's expiration property +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + Date string `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date, not recommended + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"` // Specifies whether the expired delete tag is automatically deleted +} + +// LifecycleTransition defines the rule's transition propery +type LifecycleTransition struct { + XMLName xml.Name `xml:"Transition"` + Days int `xml:"Days,omitempty"` // Relative transition time: The transition time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + StorageClass StorageClassType `xml:"StorageClass,omitempty"` // Specifies the target storage type +} + +// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery +type LifecycleAbortMultipartUpload struct { + XMLName xml.Name `xml:"AbortMultipartUpload"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired +} + +// LifecycleVersionExpiration defines the rule's NoncurrentVersionExpiration propery +type LifecycleVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version +} + +// LifecycleVersionTransition defines the rule's NoncurrentVersionTransition propery +type LifecycleVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} + +const iso8601DateFormat = "2006-01-02T15:04:05.000Z" +const iso8601DateFormatSecond = "2006-01-02T15:04:05Z" + +// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Date: date}} +} + +// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error. +func verifyLifecycleRules(rules []LifecycleRule) error { + if len(rules) == 0 { + return fmt.Errorf("invalid rules, the length of rules is zero") + } + for k, rule := range rules { + if rule.Status != "Enabled" && rule.Status != "Disabled" { + return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled") + } + + abortMPU := rule.AbortMultipartUpload + if abortMPU != nil { + if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") { + return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + + transitions := rule.Transitions + if len(transitions) > 0 { + for _, transition := range transitions { + if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") { + return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + } + + // NonVersionTransition is not suggested to use + // to keep compatible + if rule.NonVersionTransition != nil && len(rule.NonVersionTransitions) > 0 { + return fmt.Errorf("NonVersionTransition and NonVersionTransitions cannot both have values") + } else if rule.NonVersionTransition != nil { + rules[k].NonVersionTransitions = append(rules[k].NonVersionTransitions, *rule.NonVersionTransition) + } + } + + return nil +} + +// GetBucketLifecycleResult defines GetBucketLifecycle's result object +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML defines Referer configuration +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer + RefererList []string `xml:"RefererList>Referer"` // Referer whitelist +} + +// GetBucketRefererResult defines result object for GetBucketReferer request +type GetBucketRefererResult RefererXML + +// LoggingXML defines logging configuration +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled defines the logging configuration information +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files + TargetPrefix string `xml:"TargetPrefix"` // The log file prefix +} + +// GetBucketLoggingResult defines the result from GetBucketLogging request +type GetBucketLoggingResult LoggingXML + +// WebsiteXML defines Website configuration +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` // The index page + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` // The error page + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` // The routing Rule list +} + +// IndexDocument defines the index page info +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // The file name for the index page +} + +// ErrorDocument defines the 404 error page info +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404 error file name +} + +// RoutingRule defines the routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + RuleNumber int `xml:"RuleNumber,omitempty"` // The routing number + Condition Condition `xml:"Condition,omitempty"` // The routing condition + Redirect Redirect `xml:"Redirect,omitempty"` // The routing redirect + +} + +// Condition defines codition in the RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` // Matching objcet prefix + HTTPErrorCodeReturnedEquals int `xml:"HttpErrorCodeReturnedEquals,omitempty"` // The rule is for Accessing to the specified object + IncludeHeader []IncludeHeader `xml:"IncludeHeader"` // The rule is for request which include header +} + +// IncludeHeader defines includeHeader in the RoutingRule's Condition +type IncludeHeader struct { + XMLName xml.Name `xml:"IncludeHeader"` + Key string `xml:"Key,omitempty"` // The Include header key + Equals string `xml:"Equals,omitempty"` // The Include header value +} + +// Redirect defines redirect in the RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + RedirectType string `xml:"RedirectType,omitempty"` // The redirect type, it have Mirror,External,Internal,AliCDN + PassQueryString *bool `xml:"PassQueryString"` // Whether to send the specified request's parameters, true or false + MirrorURL string `xml:"MirrorURL,omitempty"` // Mirror of the website address back to the source. + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` // To Mirror of the website Whether to send the specified request's parameters, true or false + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` // Redirect the location, if the mirror return 3XX + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` // Check the mirror is MD5. + MirrorHeaders MirrorHeaders `xml:"MirrorHeaders,omitempty"` // Mirror headers + Protocol string `xml:"Protocol,omitempty"` // The redirect Protocol + HostName string `xml:"HostName,omitempty"` // The redirect HostName + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` // object name'Prefix replace the value + HttpRedirectCode int `xml:"HttpRedirectCode,omitempty"` // THe redirect http code + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` // object name replace the value +} + +// MirrorHeaders defines MirrorHeaders in the Redirect +type MirrorHeaders struct { + XMLName xml.Name `xml:"MirrorHeaders"` + PassAll *bool `xml:"PassAll"` // Penetrating all of headers to source website. + Pass []string `xml:"Pass"` // Penetrating some of headers to source website. + Remove []string `xml:"Remove"` // Prohibit passthrough some of headers to source website + Set []MirrorHeaderSet `xml:"Set"` // Setting some of headers send to source website +} + +// MirrorHeaderSet defines Set for Redirect's MirrorHeaders +type MirrorHeaderSet struct { + XMLName xml.Name `xml:"Set"` + Key string `xml:"Key,omitempty"` // The mirror header key + Value string `xml:"Value,omitempty"` // The mirror header value +} + +// GetBucketWebsiteResult defines the result from GetBucketWebsite request. +type GetBucketWebsiteResult WebsiteXML + +// CORSXML defines CORS configuration +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS rules +} + +// CORSRule defines CORS rules +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*' + AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods + AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers + ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds +} + +// GetBucketCORSResult defines the result from GetBucketCORS request. +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult defines the result from GetBucketInfo request. +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket creation time + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + RedundancyType string `xml:"DataRedundancyType"` // Bucket DataRedundancyType + Owner Owner `xml:"Owner"` // Bucket owner + StorageClass string `xml:"StorageClass"` // Bucket storage class + SseRule SSERule `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + Versioning string `xml:"Versioning"` // Bucket Versioning + TransferAcceleration string `xml:"TransferAcceleration"` // bucket TransferAcceleration + CrossRegionReplication string `xml:"CrossRegionReplication"` // bucket CrossRegionReplication +} + +type SSERule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` // Bucket KMSMasterKeyID + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` // Bucket SSEAlgorithm + KMSDataEncryption string `xml:"KMSDataEncryption,omitempty"` //Bucket KMSDataEncryption +} + +// ListObjectsResult defines the result from ListObjects request +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + Marker string `xml:"Marker"` // The marker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextMarker string `xml:"NextMarker"` // The start point of the next query + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ObjectProperties defines Objecct properties +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object key + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + Owner Owner `xml:"Owner"` // Object owner information + LastModified time.Time `xml:"LastModified"` // Object last modified time + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) +} + +// ListObjectsResultV2 defines the result from ListObjectsV2 request +type ListObjectsResultV2 struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + StartAfter string `xml:"StartAfter"` // the input StartAfter + ContinuationToken string `xml:"ContinuationToken"` // the input ContinuationToken + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextContinuationToken string `xml:"NextContinuationToken"` // The start point of the next NextContinuationToken + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ListObjectVersionsResult defines the result from ListObjectVersions request +type ListObjectVersionsResult struct { + XMLName xml.Name `xml:"ListVersionsResult"` + Name string `xml:"Name"` // The Bucket Name + Owner Owner `xml:"Owner"` // The owner of bucket + Prefix string `xml:"Prefix"` // The object prefix + KeyMarker string `xml:"KeyMarker"` // The start marker filter. + VersionIdMarker string `xml:"VersionIdMarker"` // The start VersionIdMarker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextKeyMarker string `xml:"NextKeyMarker"` // The start point of the next query + NextVersionIdMarker string `xml:"NextVersionIdMarker"` // The start point of the next query + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` // DeleteMarker list + ObjectVersions []ObjectVersionProperties `xml:"Version"` // version list +} + +type ObjectDeleteMarkerProperties struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is current version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Owner Owner `xml:"Owner"` // bucket owner element +} + +type ObjectVersionProperties struct { + XMLName xml.Name `xml:"Version"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is latest version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) + Owner Owner `xml:"Owner"` // bucket owner element +} + +// Owner defines Bucket/Object's owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // Owner ID + DisplayName string `xml:"DisplayName"` // Owner's display name +} + +// CopyObjectResult defines result object of CopyObject +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // New object's last modified time. + ETag string `xml:"ETag"` // New object's ETag +} + +// GetObjectACLResult defines result of GetObjectACL request +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // Objects to delete + Quiet bool `xml:"Quiet"` // Flag of quiet mode. +} + +// DeleteObject defines the struct for deleting object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object name + VersionId string `xml:"VersionId,omitempty"` // Object VersionId +} + +// DeleteObjectsResult defines result of DeleteObjects request +type DeleteObjectsResult struct { + XMLName xml.Name + DeletedObjects []string // Deleted object key list +} + +// DeleteObjectsResult_inner defines result of DeleteObjects request +type DeleteObjectVersionsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjectsDetail []DeletedKeyInfo `xml:"Deleted"` // Deleted object detail info +} + +// DeleteKeyInfo defines object delete info +type DeletedKeyInfo struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` // Object key + VersionId string `xml:"VersionId"` // VersionId + DeleteMarker bool `xml:"DeleteMarker"` // Object DeleteMarker + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` // Object DeleteMarkerVersionId +} + +// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name to upload + UploadID string `xml:"UploadId"` // Generated UploadId +} + +// UploadPart defines the upload/copy part +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + ETag string `xml:"ETag"` // ETag value of the part's data +} + +type UploadParts []UploadPart + +func (slice UploadParts) Len() int { + return len(slice) +} + +func (slice UploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice UploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult defines result object of multipart copy request. +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object URL + Bucket string `xml:"Bucket"` // Bucket name + ETag string `xml:"ETag"` // Object ETag + Key string `xml:"Key"` // Object name +} + +// ListUploadedPartsResult defines result object of ListUploadedParts +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // Upload ID + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number + MaxParts int `xml:"MaxParts"` // Max parts count + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned. + UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts +} + +// UploadedPart defines uploaded part +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag cache + Size int `xml:"Size"` // Part size +} + +// ListMultipartUploadResult defines result object of ListMultipartUpload +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Delimiter string `xml:"Delimiter"` // Delimiter for grouping object. + Prefix string `xml:"Prefix"` // Object prefix + KeyMarker string `xml:"KeyMarker"` // Object key marker + UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker + NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned. + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned. + MaxUploads int `xml:"MaxUploads"` // Max uploads to return + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned. + Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted) + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list. +} + +// UncompletedUpload structure wraps an uncompleted upload task +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // The UploadId + Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z +} + +// ProcessObjectResult defines result object of ProcessObject +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + Status string `json:"status"` +} + +// decodeDeleteObjectsResult decodes deleting objects result in URL encoding +func decodeDeleteObjectsResult(result *DeleteObjectVersionsResult) error { + var err error + for i := 0; i < len(result.DeletedObjectsDetail); i++ { + result.DeletedObjectsDetail[i].Key, err = url.QueryUnescape(result.DeletedObjectsDetail[i].Key) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResultV2(result *ListObjectsResultV2) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.StartAfter, err = url.QueryUnescape(result.StartAfter) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextContinuationToken, err = url.QueryUnescape(result.NextContinuationToken) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectVersionsResult decodes list version objects result in URL encoding +func decodeListObjectVersionsResult(result *ListObjectVersionsResult) error { + var err error + + // decode:Delimiter + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + + // decode Prefix + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + + // decode KeyMarker + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + + // decode VersionIdMarker + result.VersionIdMarker, err = url.QueryUnescape(result.VersionIdMarker) + if err != nil { + return err + } + + // decode NextKeyMarker + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + + // decode NextVersionIdMarker + result.NextVersionIdMarker, err = url.QueryUnescape(result.NextVersionIdMarker) + if err != nil { + return err + } + + // decode CommonPrefixes + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + + // decode deleteMarker + for i := 0; i < len(result.ObjectDeleteMarkers); i++ { + result.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(result.ObjectDeleteMarkers[i].Key) + if err != nil { + return err + } + } + + // decode ObjectVersions + for i := 0; i < len(result.ObjectVersions); i++ { + result.ObjectVersions[i].Key, err = url.QueryUnescape(result.ObjectVersions[i].Key) + if err != nil { + return err + } + } + + return nil +} + +// decodeListUploadedPartsResult decodes +func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error { + var err error + result.Key, err = url.QueryUnescape(result.Key) + if err != nil { + return err + } + return nil +} + +// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration defines the configuration for creating a bucket. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` + DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"` + ObjectHashFunction ObjecthashFuncType `xml:"ObjectHashFunction,omitempty"` +} + +// LiveChannelConfiguration defines the configuration for live-channel +type LiveChannelConfiguration struct { + XMLName xml.Name `xml:"LiveChannelConfiguration"` + Description string `xml:"Description,omitempty"` //Description of live-channel, up to 128 bytes + Status string `xml:"Status,omitempty"` //Specify the status of livechannel + Target LiveChannelTarget `xml:"Target"` //target configuration of live-channel + // use point instead of struct to avoid omit empty snapshot + Snapshot *LiveChannelSnapshot `xml:"Snapshot,omitempty"` //snapshot configuration of live-channel +} + +// LiveChannelTarget target configuration of live-channel +type LiveChannelTarget struct { + XMLName xml.Name `xml:"Target"` + Type string `xml:"Type"` //the type of object, only supports HLS + FragDuration int `xml:"FragDuration,omitempty"` //the length of each ts object (in seconds), in the range [1,100] + FragCount int `xml:"FragCount,omitempty"` //the number of ts objects in the m3u8 object, in the range of [1,100] + PlaylistName string `xml:"PlaylistName,omitempty"` //the name of m3u8 object, which must end with ".m3u8" and the length range is [6,128] +} + +// LiveChannelSnapshot snapshot configuration of live-channel +type LiveChannelSnapshot struct { + XMLName xml.Name `xml:"Snapshot"` + RoleName string `xml:"RoleName,omitempty"` //The role of snapshot operations, it sholud has write permission of DestBucket and the permission to send messages to the NotifyTopic. + DestBucket string `xml:"DestBucket,omitempty"` //Bucket the snapshots will be written to. should be the same owner as the source bucket. + NotifyTopic string `xml:"NotifyTopic,omitempty"` //Topics of MNS for notifying users of high frequency screenshot operation results + Interval int `xml:"Interval,omitempty"` //interval of snapshots, threre is no snapshot if no I-frame during the interval time +} + +// CreateLiveChannelResult the result of crete live-channel +type CreateLiveChannelResult struct { + XMLName xml.Name `xml:"CreateLiveChannelResult"` + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// LiveChannelStat the result of get live-channel state +type LiveChannelStat struct { + XMLName xml.Name `xml:"LiveChannelStat"` + Status string `xml:"Status"` //Current push status of live-channel: Disabled,Live,Idle + ConnectedTime time.Time `xml:"ConnectedTime"` //The time when the client starts pushing, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of the client + Video LiveChannelVideo `xml:"Video"` //Video stream information + Audio LiveChannelAudio `xml:"Audio"` //Audio stream information +} + +// LiveChannelVideo video stream information +type LiveChannelVideo struct { + XMLName xml.Name `xml:"Video"` + Width int `xml:"Width"` //Width (unit: pixels) + Height int `xml:"Height"` //Height (unit: pixels) + FrameRate int `xml:"FrameRate"` //FramRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) +} + +// LiveChannelAudio audio stream information +type LiveChannelAudio struct { + XMLName xml.Name `xml:"Audio"` + SampleRate int `xml:"SampleRate"` //SampleRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) + Codec string `xml:"Codec"` //Encoding forma +} + +// LiveChannelHistory the result of GetLiveChannelHistory, at most return up to lastest 10 push records +type LiveChannelHistory struct { + XMLName xml.Name `xml:"LiveChannelHistory"` + Record []LiveRecord `xml:"LiveRecord"` //push records list +} + +// LiveRecord push recode +type LiveRecord struct { + XMLName xml.Name `xml:"LiveRecord"` + StartTime time.Time `xml:"StartTime"` //StartTime, format: ISO8601 + EndTime time.Time `xml:"EndTime"` //EndTime, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of remote client +} + +// ListLiveChannelResult the result of ListLiveChannel +type ListLiveChannelResult struct { + XMLName xml.Name `xml:"ListLiveChannelResult"` + Prefix string `xml:"Prefix"` //Filter by the name start with the value of "Prefix" + Marker string `xml:"Marker"` //cursor from which starting list + MaxKeys int `xml:"MaxKeys"` //The maximum count returned. the default value is 100. it cannot be greater than 1000. + IsTruncated bool `xml:"IsTruncated"` //Indicates whether all results have been returned, "true" indicates partial results returned while "false" indicates all results have been returned + NextMarker string `xml:"NextMarker"` //NextMarker indicate the Marker value of the next request + LiveChannel []LiveChannelInfo `xml:"LiveChannel"` //The infomation of live-channel +} + +// LiveChannelInfo the infomation of live-channel +type LiveChannelInfo struct { + XMLName xml.Name `xml:"LiveChannel"` + Name string `xml:"Name"` //The name of live-channel + Description string `xml:"Description"` //Description of live-channel + Status string `xml:"Status"` //Status: disabled or enabled + LastModified time.Time `xml:"LastModified"` //Last modification time, format: ISO8601 + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// Tag a tag for the object +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// Tagging tagset for the object +type Tagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag,omitempty"` +} + +// for GetObjectTagging return value +type GetObjectTaggingResult Tagging + +// VersioningConfig for the bucket +type VersioningConfig struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` +} + +type GetBucketVersioningResult VersioningConfig + +// Server Encryption rule for the bucket +type ServerEncryptionRule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` + SSEDefault SSEDefaultRule `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Server Encryption deafult rule for the bucket +type SSEDefaultRule struct { + XMLName xml.Name `xml:"ApplyServerSideEncryptionByDefault"` + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + KMSDataEncryption string `xml:"KMSDataEncryption,,omitempty"` +} + +type GetBucketEncryptionResult ServerEncryptionRule +type GetBucketTaggingResult Tagging + +type BucketStat struct { + XMLName xml.Name `xml:"BucketStat"` + Storage int64 `xml:"Storage"` + ObjectCount int64 `xml:"ObjectCount"` + MultipartUploadCount int64 `xml:"MultipartUploadCount"` +} +type GetBucketStatResult BucketStat + +// RequestPaymentConfiguration define the request payment configuration +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer string `xml:"Payer,omitempty"` +} + +// BucketQoSConfiguration define QoS configuration +type BucketQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + TotalUploadBandwidth *int `xml:"TotalUploadBandwidth"` // Total upload bandwidth + IntranetUploadBandwidth *int `xml:"IntranetUploadBandwidth"` // Intranet upload bandwidth + ExtranetUploadBandwidth *int `xml:"ExtranetUploadBandwidth"` // Extranet upload bandwidth + TotalDownloadBandwidth *int `xml:"TotalDownloadBandwidth"` // Total download bandwidth + IntranetDownloadBandwidth *int `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth + ExtranetDownloadBandwidth *int `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth + TotalQPS *int `xml:"TotalQps"` // Total Qps + IntranetQPS *int `xml:"IntranetQps"` // Intranet Qps + ExtranetQPS *int `xml:"ExtranetQps"` // Extranet Qps +} + +// UserQoSConfiguration define QoS and Range configuration +type UserQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + Region string `xml:"Region,omitempty"` // Effective area of Qos configuration + BucketQoSConfiguration +} + +////////////////////////////////////////////////////////////// +/////////////////// Select OBject //////////////////////////// +////////////////////////////////////////////////////////////// + +type CsvMetaRequest struct { + XMLName xml.Name `xml:"CsvMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params +func (meta *CsvMetaRequest) encodeBase64() { + meta.InputSerialization.CSV.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.RecordDelimiter)) + meta.InputSerialization.CSV.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.FieldDelimiter)) + meta.InputSerialization.CSV.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.QuoteCharacter)) +} + +type JsonMetaRequest struct { + XMLName xml.Name `xml:"JsonMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +type InputSerialization struct { + XMLName xml.Name `xml:"InputSerialization"` + CSV CSV `xml:CSV,omitempty` + JSON JSON `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSV struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` +} + +type JSON struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` +} + +// SelectRequest is for the SelectObject request params of json file +type SelectRequest struct { + XMLName xml.Name `xml:"SelectRequest"` + Expression string `xml:"Expression"` + InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"` + OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"` + SelectOptions SelectOptions `xml:"Options,omitempty"` +} +type InputSerializationSelect struct { + XMLName xml.Name `xml:"InputSerialization"` + CsvBodyInput CSVSelectInput `xml:CSV,omitempty` + JsonBodyInput JSONSelectInput `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSVSelectInput struct { + XMLName xml.Name `xml:"CSV"` + FileHeaderInfo string `xml:"FileHeaderInfo,omitempty"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` + CommentCharacter string `xml:"CommentCharacter,omitempty"` + Range string `xml:"Range,omitempty"` + SplitRange string +} +type JSONSelectInput struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` + Range string `xml:"Range,omitempty"` + ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"` + SplitRange string +} + +func (jsonInput *JSONSelectInput) JsonIsEmpty() bool { + if jsonInput.JSONType != "" { + return false + } + return true +} + +type OutputSerializationSelect struct { + XMLName xml.Name `xml:"OutputSerialization"` + CsvBodyOutput CSVSelectOutput `xml:CSV,omitempty` + JsonBodyOutput JSONSelectOutput `xml:JSON,omitempty` + OutputRawData *bool `xml:"OutputRawData,omitempty"` + KeepAllColumns *bool `xml:"KeepAllColumns,omitempty"` + EnablePayloadCrc *bool `xml:"EnablePayloadCrc,omitempty"` + OutputHeader *bool `xml:"OutputHeader,omitempty"` +} +type CSVSelectOutput struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` +} +type JSONSelectOutput struct { + XMLName xml.Name `xml:"JSON"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` +} + +func (selectReq *SelectRequest) encodeBase64() { + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + selectReq.csvEncodeBase64() + } else { + selectReq.jsonEncodeBase64() + } +} + +// csvEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) csvEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter)) + selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.CsvBodyInput.Range != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.CsvBodyInput.Range + } + + if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange + } +} + +// jsonEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) jsonEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.JsonBodyInput.Range != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.JsonBodyInput.Range + } + + if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange + } +} + +// CsvOptions is a element in the SelectObject api request's params +type SelectOptions struct { + XMLName xml.Name `xml:"Options"` + SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord,omitempty"` + MaxSkippedRecordsAllowed string `xml:"MaxSkippedRecordsAllowed,omitempty"` +} + +// SelectObjectResult is the SelectObject api's return +type SelectObjectResult struct { + Version byte + FrameType int32 + PayloadLength int32 + HeaderCheckSum uint32 + Offset uint64 + Data string // DataFrame + EndFrame EndFrame // EndFrame + MetaEndFrameCSV MetaEndFrameCSV // MetaEndFrameCSV + MetaEndFrameJSON MetaEndFrameJSON // MetaEndFrameJSON + PayloadChecksum uint32 + ReadFlagInfo +} + +// ReadFlagInfo if reading the frame data, recode the reading status +type ReadFlagInfo struct { + OpenLine bool + ConsumedBytesLength int32 + EnablePayloadCrc bool + OutputRawData bool +} + +// EndFrame is EndFrameType of SelectObject api +type EndFrame struct { + TotalScanned int64 + HTTPStatusCode int32 + ErrorMsg string +} + +// MetaEndFrameCSV is MetaEndFrameCSVType of CreateSelectObjectMeta +type MetaEndFrameCSV struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string +} + +// MetaEndFrameJSON is MetaEndFrameJSON of CreateSelectObjectMeta +type MetaEndFrameJSON struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ErrorMsg string +} + +// InventoryConfiguration is Inventory config +type InventoryConfiguration struct { + XMLName xml.Name `xml:"InventoryConfiguration"` + Id string `xml:"Id,omitempty"` + IsEnabled *bool `xml:"IsEnabled,omitempty"` + Prefix string `xml:"Filter>Prefix,omitempty"` + OSSBucketDestination OSSBucketDestination `xml:"Destination>OSSBucketDestination,omitempty"` + Frequency string `xml:"Schedule>Frequency,omitempty"` + IncludedObjectVersions string `xml:"IncludedObjectVersions,omitempty"` + OptionalFields OptionalFields `xml:OptionalFields,omitempty` +} + +type OptionalFields struct { + XMLName xml.Name `xml:"OptionalFields,omitempty` + Field []string `xml:"Field,omitempty` +} + +type OSSBucketDestination struct { + XMLName xml.Name `xml:"OSSBucketDestination"` + Format string `xml:"Format,omitempty"` + AccountId string `xml:"AccountId,omitempty"` + RoleArn string `xml:"RoleArn,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Encryption *InvEncryption `xml:"Encryption,omitempty"` +} + +type InvEncryption struct { + XMLName xml.Name `xml:"Encryption"` + SseOss *InvSseOss `xml:"SSE-OSS"` + SseKms *InvSseKms `xml:"SSE-KMS"` +} + +type InvSseOss struct { + XMLName xml.Name `xml:"SSE-OSS"` +} + +type InvSseKms struct { + XMLName xml.Name `xml:"SSE-KMS"` + KmsId string `xml:"KeyId,omitempty"` +} + +type ListInventoryConfigurationsResult struct { + XMLName xml.Name `xml:"ListInventoryConfigurationsResult"` + InventoryConfiguration []InventoryConfiguration `xml:"InventoryConfiguration,omitempty` + IsTruncated *bool `xml:"IsTruncated,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` +} + +// RestoreConfiguration for RestoreObject +type RestoreConfiguration struct { + XMLName xml.Name `xml:"RestoreRequest"` + Days int32 `xml:"Days,omitempty"` + Tier string `xml:"JobParameters>Tier,omitempty"` +} + +// AsyncFetchTaskConfiguration for SetBucketAsyncFetchTask +type AsyncFetchTaskConfiguration struct { + XMLName xml.Name `xml:"AsyncFetchTaskConfiguration"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// AsyncFetchTaskResult for SetBucketAsyncFetchTask result +type AsyncFetchTaskResult struct { + XMLName xml.Name `xml:"AsyncFetchTaskResult"` + TaskId string `xml:"TaskId,omitempty"` +} + +// AsynFetchTaskInfo for GetBucketAsyncFetchTask result +type AsynFetchTaskInfo struct { + XMLName xml.Name `xml:"AsyncFetchTaskInfo"` + TaskId string `xml:"TaskId,omitempty"` + State string `xml:"State,omitempty"` + ErrorMsg string `xml:"ErrorMsg,omitempty"` + TaskInfo AsyncTaskInfo `xml:"TaskInfo,omitempty"` +} + +// AsyncTaskInfo for async task information +type AsyncTaskInfo struct { + XMLName xml.Name `xml:"TaskInfo"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// InitiateWormConfiguration define InitiateBucketWorm configuration +type InitiateWormConfiguration struct { + XMLName xml.Name `xml:"InitiateWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// ExtendWormConfiguration define ExtendWormConfiguration configuration +type ExtendWormConfiguration struct { + XMLName xml.Name `xml:"ExtendWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// WormConfiguration define WormConfiguration +type WormConfiguration struct { + XMLName xml.Name `xml:"WormConfiguration"` + WormId string `xml:"WormId,omitempty"` + State string `xml:"State,omitempty"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days + CreationDate string `xml:"CreationDate,omitempty"` +} + +// TransferAccConfiguration define transfer acceleration configuration +type TransferAccConfiguration struct { + XMLName xml.Name `xml:"TransferAccelerationConfiguration"` + Enabled bool `xml:"Enabled"` +} + +// ReplicationXML defines simple replication xml, and ReplicationXML is used for "DeleteBucketReplication" in client.go +type ReplicationXML struct { + XMLName xml.Name `xml:"ReplicationRules"` + ID string `xml:"ID,omitempty"` +} + +// CnameConfigurationXML define cname configuration +type CnameConfigurationXML struct { + XMLName xml.Name `xml:"BucketCnameConfiguration"` + Domain string `xml:"Cname>Domain"` +} + +// CnameTokenXML define cname token information +type CnameTokenXML struct { + XMLName xml.Name `xml:"CnameToken"` + Bucket string `xml:"Bucket,omitempty"` + Cname string `xml:"Cname,omitempty"` + Token string `xml:"Token,omitempty"` + ExpireTime string `xml:"ExpireTime,omitempty"` +} + +// CreateBucketCnameTokenResult defines result object for CreateBucketCnameToken request +type CreateBucketCnameTokenResult CnameTokenXML + +// GetBucketCnameTokenResult defines result object for GetBucketCnameToken request +type GetBucketCnameTokenResult CnameTokenXML diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 0000000..53c1a68 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,552 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "time" +) + +// UploadFile is multipart file upload. +// +// objectKey the object name. +// filePath the local file path to upload. +// partSize the part size in byte. +// options the options for uploading object. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (100KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) + if cpFilePath != "" { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) + } + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + absPath, _ := filepath.Abs(srcFile) + cpFileName := getCpFileName(absPath, dest, "") + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- concurrent upload without checkpoint ----- + +// getCpConfig gets checkpoint configuration +func getCpConfig(options []Option) *cpConfig { + cpcOpt, err := FindOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return nil + } + + return cpcOpt.(*cpConfig) +} + +// getCpFileName return the name of the checkpoint file +func getCpFileName(src, dest, versionId string) string { + md5Ctx := md5.New() + md5Ctx.Write([]byte(src)) + srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + md5Ctx.Reset() + md5Ctx.Write([]byte(dest)) + destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + if versionId == "" { + return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) + } + + md5Ctx.Reset() + md5Ctx.Write([]byte(versionId)) + versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum) +} + +// getRoutines gets the routine count. by default it's 1. +func getRoutines(options []Option) int { + rtnOpt, err := FindOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// getPayer return the payer of the request +func getPayer(options []Option) string { + payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil) + if err != nil || payerOpt == nil { + return "" + } + return payerOpt.(string) +} + +// GetProgressListener gets the progress callback +func GetProgressListener(options []Option) ProgressListener { + isSet, listener, _ := IsOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// uploadPartHook is for testing usage +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// workerArg defines worker argument structure +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + options []Option + hook uploadPartHook +} + +// worker is the worker coroutine function +type defaultUploadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + var respHeader http.Header + p := Progress(&defaultUploadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + opts = append(opts, arg.options...) + + // use defaultUploadProgressListener + opts = append(opts, p, GetResponseHeader(&respHeader)) + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error()) + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// scheduler function +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// uploadFile is a concurrent upload, without checkpoint +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := GetProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + // Initialize the multipart upload + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the worker coroutine + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule the jobs + go scheduler(jobs, chunks) + + // Waiting for the upload finished + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multpart upload + _, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- concurrent upload with checkpoint ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint file content's MD5 + FilePath string // Local file path + FileStat cpStat // File state + ObjectKey string // Key + UploadID string // Upload ID + Parts []cpPart // All parts of the local file +} + +type cpStat struct { + Size int64 // File size + LastModified time.Time // File's last modified time + MD5 string // Local file's MD5 +} + +type cpPart struct { + Chunk FileChunk // File chunk + Part UploadPart // Uploaded part + IsCompleted bool // Upload complete flag +} + +// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid. +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // Compare the CP's magic number and MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // Make sure if the local file is updated. + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // Compare the file size, file's last modified time and file's MD5 + if cp.FileStat.Size != st.Size() || + !cp.FileStat.LastModified.Equal(st.ModTime()) || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// load loads from the file +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump dumps to the local file +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// updatePart updates the part status +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// todoParts returns unfinished parts +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// allParts returns all parts +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// getCompletedBytes returns completed bytes count +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// calcFileMD5 calculates the MD5 for the specified local file +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// prepare initializes the multipart upload +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // CP + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // Local file + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // Chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // Init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// complete completes the multipart upload and deletes the local CP files +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// uploadFileWithCp handles concurrent upload with checkpoint +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := GetProgressListener(options) + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + // Load CP data + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Load error or the CP data is invalid. + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Start the workers + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule jobs + go scheduler(jobs, chunks) + + // Waiting for the job finished + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Complete the multipart upload + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 0000000..25657e2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,534 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "hash/crc64" + "io" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +var sys_name string +var sys_release string +var sys_machine string + +func init() { + sys_name = runtime.GOOS + sys_release = "-" + sys_machine = runtime.GOARCH + + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + sys_name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + sys_release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + sys_machine = string(bytes.TrimSpace(out)) + } +} + +// userAgent gets user agent +// It has the SDK version information, OS information and GO version +func userAgent() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +} + +type sysInfo struct { + name string // OS name such as windows/Linux + release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc + machine string // CPU type amd64/x86_64 +} + +// getSysInfo gets system info +// gets the OS information and CPU type +func getSysInfo() sysInfo { + return sysInfo{name: sys_name, release: sys_release, machine: sys_machine} +} + +// GetRangeConfig gets the download range from the options. +func GetRangeConfig(options []Option) (*UnpackedRange, error) { + rangeOpt, err := FindOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return ParseRange(rangeOpt.(string)) +} + +// UnpackedRange +type UnpackedRange struct { + HasStart bool // Flag indicates if the start point is specified + HasEnd bool // Flag indicates if the end point is specified + Start int64 // Start point + End int64 // End point +} + +// InvalidRangeError returns invalid range error +func InvalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +func GetRangeString(unpackRange UnpackedRange) string { + var strRange string + if unpackRange.HasStart && unpackRange.HasEnd { + strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End) + } else if unpackRange.HasStart { + strRange = fmt.Sprintf("%d-", unpackRange.Start) + } else if unpackRange.HasEnd { + strRange = fmt.Sprintf("-%d", unpackRange.End) + } + return strRange +} + +// ParseRange parse various styles of range such as bytes=M-N +func ParseRange(normalizedRange string) (*UnpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // Bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, InvalidRangeError(normalizedRange) + } + + // Bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, InvalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &UnpackedRange{hasStart, hasEnd, start, end}, nil +} + +// AdjustRange returns adjusted range, adjust the range according to the length of the file +func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.HasStart && ur.HasEnd { + start = ur.Start + end = ur.End + 1 + if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End { + start = 0 + end = size + } + } else if ur.HasStart { + start = ur.Start + end = size + if ur.Start < 0 || ur.Start >= size { + start = 0 + } + } else if ur.HasEnd { + start = size - ur.End + end = size + if ur.End < 0 || ur.End > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// gets the current time in Unix time, in nanoseconds. +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT gets the current time in GMT format. +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk is the file chunk definition +type FileChunk struct { + Number int // Chunk number + Offset int64 // Chunk offset + Size int64 // Chunk size. +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd calculates the end position +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// CrcTable returns the table constructed from the specified polynomial +var CrcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} + +// CrcTable returns the table constructed from the specified polynomial +var crc32Table = func() *crc32.Table { + return crc32.MakeTable(crc32.IEEE) +} + +// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart +func ChoiceTransferPartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil) + if trafficLimit != nil { + speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64) + outOption = append(outOption, TrafficLimitHeader(speed)) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart +func ChoiceCompletePartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil) + if acl != nil { + outOption = append(outOption, ObjectACL(ACLType(acl.(string)))) + } + + callback, _ := FindOption(options, HTTPHeaderOssCallback, nil) + if callback != nil { + outOption = append(outOption, Callback(callback.(string))) + } + + callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil) + if callbackVar != nil { + outOption = append(outOption, CallbackVar(callbackVar.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil) + if forbidOverWrite != nil { + if forbidOverWrite.(string) == "true" { + outOption = append(outOption, ForbidOverWrite(true)) + } else { + outOption = append(outOption, ForbidOverWrite(false)) + } + } + + return outOption +} + +// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload +func ChoiceAbortPartOption(options []Option) []Option { + var outOption []Option + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceHeadObjectOption choices valid option supported by HeadObject +func ChoiceHeadObjectOption(options []Option) []Option { + var outOption []Option + + // not select HTTPHeaderRange to get whole object length + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +func CheckBucketName(bucketName string) error { + nameLen := len(bucketName) + if nameLen < 3 || nameLen > 63 { + return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen) + } + + for _, v := range bucketName { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName) + } + } + if bucketName[0] == '-' || bucketName[nameLen-1] == '-' { + return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName) + } + return nil +} + +func GetReaderLen(reader io.Reader) (int64, error) { + var contentLength int64 + var err error + switch v := reader.(type) { + case *bytes.Buffer: + contentLength = int64(v.Len()) + case *bytes.Reader: + contentLength = int64(v.Len()) + case *strings.Reader: + contentLength = int64(v.Len()) + case *os.File: + fInfo, fError := v.Stat() + if fError != nil { + err = fmt.Errorf("can't get reader content length,%s", fError.Error()) + } else { + contentLength = fInfo.Size() + } + case *io.LimitedReader: + contentLength = int64(v.N) + case *LimitedReadCloser: + contentLength = int64(v.N) + default: + err = fmt.Errorf("can't get reader content length,unkown reader type") + } + return contentLength, err +} + +func LimitReadCloser(r io.Reader, n int64) io.Reader { + var lc LimitedReadCloser + lc.R = r + lc.N = n + return &lc +} + +// LimitedRC support Close() +type LimitedReadCloser struct { + io.LimitedReader +} + +func (lc *LimitedReadCloser) Close() error { + if closer, ok := lc.R.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(b []byte) (int, error) { + n, err := drc.RC.Read(b) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(b[0:realLen], b[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + closer, ok := drc.RC.(io.ReadCloser) + if ok { + return closer.Close() + } + return nil +} + +func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) { + for _, key := range keys { + value, ok := params[key] + if ok && value == "" { + // convert "" to nil + params[key] = nil + } + } +} + +func EscapeLFString(str string) string { + var log bytes.Buffer + for i := 0; i < len(str); i++ { + if str[i] != '\n' { + log.WriteByte(str[i]) + } else { + log.WriteString("\\n") + } + } + return log.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000..899129e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 0000000..1c49674 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000..99849c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000..9cf7eaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000..1a3d106 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000..142a7a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000..a4eb6a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000..11d4240 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,123 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + ft, ok := v.Type().FieldByName(n) + if !ok { + panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + prettify(val, indent+2, buf) + } + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000..3f7cffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,90 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +// +// Deprecated: Use Prettify instead. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000..b147f10 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,94 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + ResolvedRegion string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000..9f6af19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 0000000..5ac5c24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,206 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000..a7530eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,15 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + ResolvedRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 0000000..881d575 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000..4818ea4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,631 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack *bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint endpoints.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint endpoints.FIPSEndpointState + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { + dst.UseFIPSEndpoint = other.UseFIPSEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 0000000..89aad2c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,38 @@ +//go:build !go1.9 +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 0000000..6ee9ddd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,12 @@ +//go:build go1.9 +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 0000000..3132181 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 0000000..9975d56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 0000000..304fd15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000..4e076c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000..36a915e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", r.Error) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000..7d50b15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 0000000..ab69c7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000..3ad1e79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 0000000..6e3406b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 0000000..a68df0e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 0000000..0345fab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,40 @@ +//go:build !go1.9 +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 0000000..79018ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,14 @@ +//go:build go1.9 +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000..a880a3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,383 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000..92af5b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000..785f30d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000..54c5cf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000..7fc91d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 0000000..e624836 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000..22b5c5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 0000000..18c940a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 0000000..d4df39a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 0000000..eb48f61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 0000000..6eda2a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,180 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + tokenFile, err := loadTokenFile(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000..cbba1e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000..260a37c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,367 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function to read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 0000000..19ad619 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,182 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher should return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } + + for _, fn := range optFns { + fn(&p) + } + + return &p +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 0000000..25a66d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 0000000..4b19e28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 0000000..5bacc79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 0000000..82a3e34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 0000000..54a9928 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 0000000..835bcd4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000..23bb639 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 0000000..ca0ee1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 0000000..4fcb616 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000..69fa63d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000..df63bad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure( + awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 0000000..4b29f19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,93 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000..8d65ca1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,193 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + const awsGlobal = "aws-global" + const usEast1 = "us-east-1" + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { + return + } + + service.PartitionEndpoint = awsGlobal + if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { + service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} + } + service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: usEast1, + }, + } + + p.Services["s3"] = service +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + serviceDefault := s.Defaults[defaultKey{}] + if e, a := expectHostname, serviceDefault.Hostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + serviceDefault.Hostname = expectHostname + ".cn" + s.Defaults[defaultKey{}] = serviceDefault + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + serviceDefault := s.Defaults[defaultKey{}] + if a := serviceDefault.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := serviceDefault.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + serviceDefault.CredentialScope.Service = "application-autoscaling" + serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" + + if s.Defaults == nil { + s.Defaults = make(endpointDefaults) + } + + s.Defaults[defaultKey{}] = serviceDefault + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000..a8a0ab7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,32415 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. + UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "account": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "account.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + }, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplify": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifybackend": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifyuibuilder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "dkr-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.fleethub.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "app-integrations": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, + }, + }, + "apprunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aps": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + }, + }, + }, + "auditmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + }, + }, + }, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "braket": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudsearch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + }, + }, + }, + "codeartifact": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codeguru-reviewer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + }, + }, + }, + "codestar": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-sync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "contact-lens": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dataexchange": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devicefarm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "discovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elastictranscoder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + }, + }, + }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", + }, + }, + }, + "finspace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "finspace-api": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecast": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecastquery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + }, + }, + }, + "frauddetector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "prod-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + }, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "gamesparks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + }, + }, + }, + "grafana": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "grafana.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "grafana.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "grafana.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "grafana.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "grafana.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "grafana.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "grafana.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "grafana.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "grafana.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "grafana.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "groundstation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "healthlake": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "honeycode": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identity-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ivs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafkaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "af-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lookoutequipment": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "lookoutvision": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "machinelearning": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "macie": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + }, + }, + }, + "macie2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "mediaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + }, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mobileanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "sandbox", + }: endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "networkmanager.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "nimble": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "opsworks-cm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "profile": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "projects.iot1click": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "rds-fips.ca-central-1", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-1", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-2", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-1", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-2", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "rekognition-fips.ca-central-1", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-1", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-2", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-2", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "route53domains": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "s3-external-1", + }: endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "sdb.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + }, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "session.qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-incidents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + }, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-ca-central-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-west-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "wafv2.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "wafv2.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "wafv2.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "wafv2.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "wafv2.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "wafv2.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "wafv2.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "wafv2.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "wafv2.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "wafv2.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "wafv2.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "wafv2.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "wafv2.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "wafv2.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "wafv2.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "wafv2.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "wafv2.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "wafv2.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "wafv2.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "wafv2.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "wafv2.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "wisdom": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "workdocs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workmail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + }, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "account": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "account.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "batch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "health": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "rds.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + }, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-fips-secondary", + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "config": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + }, + }, + }, + "elasticache": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + }, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dataplane-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "dataplane-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + }, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + }, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rds": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rekognition-fips.us-gov-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resource-groups": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sts": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + "us-iso-west-1": region{ + Description: "US ISO WEST", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 0000000..ca8fc82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000..84316b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000..8809861 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,706 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// A Logger is a minimalistic interface for the SDK to log messages to. +type Logger interface { + Log(...interface{}) +} + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution +// behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint + // resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint FIPSEndpointState + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + EC2MetadataEndpointMode EC2IMDSEndpointModeState + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Logger is the logger that will be used to log messages. + Logger Logger + + // Determines whether logging of deprecated endpoints usage is enabled. + LogDeprecated bool +} + +func (o Options) getEndpointVariant(service string) (v endpointVariant) { + const s3 = "s3" + const s3Control = "s3-control" + + if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || + ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { + v |= dualStackVariant + } + if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { + v |= fipsVariant + } + return v +} + +// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. +type EC2IMDSEndpointModeState uint + +// Enumeration values for EC2IMDSEndpointModeState +const ( + EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota + EC2IMDSEndpointModeStateIPv4 + EC2IMDSEndpointModeStateIPv6 +) + +// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset +func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EC2IMDSEndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EC2IMDSEndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EC2IMDSEndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +// +// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. +// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseDualStackEndpointOption(o *Options) { + o.UseDualStackEndpoint = DualStackEndpointStateEnabled +} + +// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseFIPSEndpointOption(o *Options) { + o.UseFIPSEndpoint = FIPSEndpointStateEnabled +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + // Since we have removed the customization that injected this into the model + // we still need to pretend that this is a modeled service. + if _, ok := ss[Ec2metadataServiceID]; !ok { + ss[Ec2metadataServiceID] = Service{ + id: Ec2metadataServiceID, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + + service, ok := s.p.Services[s.id] + + // Since ec2metadata customization has been removed we need to check + // if it was defined in non-standard endpoints.json file. If it's not + // then we can return the empty map as there is no regional-endpoints for IMDS. + // Otherwise, we iterate need to iterate the non-standard model. + if s.id == Ec2metadataServiceID && !ok { + return rs + } + + for id := range service.Endpoints { + if id.Variant != 0 { + continue + } + if r, ok := s.p.Regions[id.Region]; ok { + rs[id.Region] = Region{ + id: id.Region, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + if id.Variant != 0 { + continue + } + es[id.Region] = Endpoint{ + id: id.Region, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 0000000..df75e89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000..89f6627 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,594 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +const ( + ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" + ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" +) + +const dnsSuffixTemplateKey = "{dnsSuffix}" + +// defaultKey is a compound map key of a variant and other values. +type defaultKey struct { + Variant endpointVariant + ServiceVariant serviceVariant +} + +// endpointKey is a compound map key of a region and associated variant value. +type endpointKey struct { + Region string + Variant endpointVariant +} + +// endpointVariant is a bit field to describe the endpoints attributes. +type endpointVariant uint64 + +// serviceVariant is a bit field to describe the service endpoint attributes. +type serviceVariant uint64 + +const ( + // fipsVariant indicates that the endpoint is FIPS capable. + fipsVariant endpointVariant = 1 << (64 - 1 - iota) + + // dualStackVariant indicates that the endpoint is DualStack capable. + dualStackVariant +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type endpointWithVariants struct { + endpoint + Variants []endpointWithTags `json:"variants"` +} + +type endpointWithTags struct { + endpoint + Tags []string `json:"tags"` +} + +type endpointDefaults map[defaultKey]endpoint + +func (p *endpointDefaults) UnmarshalJSON(data []byte) error { + if *p == nil { + *p = make(endpointDefaults) + } + + var e endpointWithVariants + if err := json.Unmarshal(data, &e); err != nil { + return err + } + + (*p)[defaultKey{Variant: 0}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*p)[defaultKey{Variant: endpointVariant}] = ve + } + + return nil +} + +func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { + if len(tags) == 0 { + unknown = true + return + } + + for _, tag := range tags { + switch { + case strings.EqualFold("fips", tag): + ev |= fipsVariant + case strings.EqualFold("dualstack", tag): + ev |= dualStackVariant + default: + unknown = true + } + } + return ev, unknown +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpointDefaults `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, options Options) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[endpointKey{ + Region: region, + Variant: options.getEndpointVariant(service), + }] + + if hasEndpoint && hasService { + return true + } + + if options.StrictMatching { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + s, hasService := p.Services[service] + + if service == Ec2metadataServiceID && !hasService { + endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) + return endpoint, nil + } + + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if r, ok := isLegacyGlobalRegion(service, region, opt); ok { + region = r + } + + variant := opt.getEndpointVariant(service) + + endpoints := s.Endpoints + + serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] + // If we searched for a variant which may have no explicit service defaults, + // then we need to inherit the standard service defaults except the hostname and dnsSuffix + if variant != 0 && !hasServiceDefault { + serviceDefaults = s.Defaults[defaultKey{}] + serviceDefaults.Hostname = "" + serviceDefaults.DNSSuffix = "" + } + + partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] + + var dnsSuffix string + if len(serviceDefaults.DNSSuffix) > 0 { + dnsSuffix = serviceDefaults.DNSSuffix + } else if variant == 0 { + // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for + // a non-variant endpoint then we need to set the dnsSuffix. + dnsSuffix = p.DNSSuffix + } + + noDefaults := !hasServiceDefault && !hasPartitionDefault + + e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) + if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) + } + + defs := []endpoint{partitionDefaults, serviceDefaults} + + return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) +} + +func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { + switch mode { + case EC2IMDSEndpointModeStateIPv6: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv6, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + case EC2IMDSEndpointModeStateIPv4: + fallthrough + default: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv4, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + } +} + +func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { + if opt.getEndpointVariant(service) != 0 { + return "", false + } + + const ( + sts = "sts" + s3 = "s3" + awsGlobal = "aws-global" + ) + + switch { + case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: + return region, false + case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: + return region, false + default: + if _, ok := legacyGlobalRegions[service][region]; ok { + return awsGlobal, true + } + } + + return region, false +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es serviceEndpoints, variant endpointVariant) []string { + list := make([]string, 0, len(es)) + for k := range es { + if k.Variant != variant { + continue + } + list = append(list, k.Region) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpointDefaults `json:"defaults"` + Endpoints serviceEndpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { + if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { + return e, true + } + + if s.IsRegionalized == boxedFalse { + return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type serviceEndpoints map[endpointKey]endpoint + +func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { + if *s == nil { + *s = make(serviceEndpoints) + } + + var regionToEndpoint map[string]endpointWithVariants + + if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { + return err + } + + for region, e := range regionToEndpoint { + (*s)[endpointKey{Region: region}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve + } + } + + return nil +} + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + DNSSuffix string `json:"dnsSuffix"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` + + Deprecated boxedBool `json:"deprecated"` +} + +// isZero returns whether the endpoint structure is an empty (zero) value. +func (e endpoint) isZero() bool { + switch { + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (credentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + case len(e.SSLCommonName) != 0: + return false + } + return true +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + if len(merged.DNSSuffix) > 0 { + dnsSuffix = merged.DNSSuffix + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { + opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) + } + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if len(other.DNSSuffix) > 0 { + e.DNSSuffix = other.DNSSuffix + } + if other.Deprecated != boxedBoolUnset { + e.Deprecated = other.Deprecated + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000..84922bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,412 @@ +//go:build codegen +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +func endpointVariantSetter(variant endpointVariant) (string, error) { + if variant == 0 { + return "0", nil + } + + if variant > (fipsVariant | dualStackVariant) { + return "", fmt.Errorf("unknown endpoint variant") + } + + var symbols []string + if variant&fipsVariant != 0 { + symbols = append(symbols, "fipsVariant") + } + if variant&dualStackVariant != 0 { + symbols = append(symbols, "dualStackVariant") + } + v := strings.Join(symbols, "|") + + return v, nil +} + +func endpointKeySetter(e endpointKey) (string, error) { + var sb strings.Builder + sb.WriteString("endpointKey{\n") + sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +func defaultKeySetter(e defaultKey) (string, error) { + var sb strings.Builder + sb.WriteString("defaultKey{\n") + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, + "EndpointVariantSetter": endpointVariantSetter, + "EndpointKeySetter": endpointKeySetter, + "DefaultKeySetter": defaultKeySetter, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Defaults" -}} +endpointDefaults{ + {{ range $id, $endpoint := . -}} + {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +serviceEndpoints{ + {{ range $id, $endpoint := . -}} + {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000..fa06f7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 0000000..91a6f27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000..49674cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,121 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody + + // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. + LogDebugWithDeprecated +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 0000000..2ba3c56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000..9556332 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,346 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header.Set(k, v) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000..79f7960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000..9370fa5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000..636d9ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,722 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + if len(operation.HTTPPath) != 0 { + opHTTPPath := operation.HTTPPath + var opQueryString string + if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { + opQueryString = opHTTPPath[idx+1:] + opHTTPPath = opHTTPPath[:idx] + } + + if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { + opHTTPPath = opHTTPPath[1:] + } + httpReq.URL.Path += opHTTPPath + httpReq.URL.RawQuery = opQueryString + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000..5921b8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,40 @@ +//go:build !go1.8 +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000..ea643c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,37 @@ +//go:build go1.8 +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 0000000..d8c5053 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,15 @@ +//go:build go1.7 +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 0000000..49a243e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,15 @@ +//go:build !go1.7 +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000..64784e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000..3f0001f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 0000000..09a44eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000..8630683 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 0000000..4601f88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 0000000..1d3f4c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,303 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/sts" +) + +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) +} + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, + credOptions *CredentialsProviderOptions, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) + + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + + cfgCopy := cfg.Copy() + cfgCopy.Region = &sharedCfg.SSORegion + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + ), nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 0000000..4390ad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,28 @@ +//go:build go1.13 +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 0000000..668565b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,27 @@ +//go:build !go1.13 && go1.7 +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 0000000..e101aa6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.6 && go1.5 +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 0000000..b5fcbe0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,24 @@ +//go:build !go1.7 && go1.6 +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000..ff3cc01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,367 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2MetadataEndpoint: "http://[::1]", + }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000..d6fa247 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,471 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } + ec2IMDSEndpointModeEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { + return envConfig{}, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} + +func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + return nil + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000..4293dbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,997 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The endpoint value should + // include the URI scheme. If the scheme is not present it will be defaulted to http. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID && len(endpoint) > 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } else if service == ec2MetadataServiceID { + opts = append(opts, func(o *endpoints.Options) { + o.EC2MetadataEndpointMode = mode + }) + } + return resolver.EndpointFor(service, region, opts...) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + var ec2IMDSEndpoint string + for _, v := range []string{ + sessOpts.EC2IMDSEndpoint, + envCfg.EC2IMDSEndpoint, + sharedCfg.EC2IMDSEndpoint, + } { + if len(v) != 0 { + ec2IMDSEndpoint = v + break + } + } + + var endpointMode endpoints.EC2IMDSEndpointModeState + for _, v := range []endpoints.EC2IMDSEndpointModeState{ + sessOpts.EC2IMDSEndpointMode, + envCfg.EC2IMDSEndpointMode, + sharedCfg.EC2IMDSEndpointMode, + } { + if v != endpoints.EC2IMDSEndpointModeStateUnset { + endpointMode = v + break + } + } + + if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000..424c82b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,729 @@ +package session + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + // EC2 IMDS Endpoint Mode + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + // EC2 IMDS Endpoint + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := cfg.validateCredentialsConfig(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) + + if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + ec2MetadataServiceEndpointModeKey, file.Filename, err) + } + updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) validateSSOConfiguration() error { + if !cfg.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(cfg.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(cfg.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(cfg.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(cfg.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + cfg.Profile, strings.Join(missing, ", ")) + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} + cfg.SSOAccountID = "" + cfg.SSORegion = "" + cfg.SSORoleName = "" + cfg.SSOStartURL = "" +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func (cfg *sharedConfig) hasSSOConfiguration() bool { + switch { + case len(cfg.SSOAccountID) != 0: + case len(cfg.SSORegion) != 0: + case len(cfg.SSORoleName) != 0: + case len(cfg.SSOStartURL) != 0: + default: + return false + } + return true +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000..9937538 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// allowList is a generic rule for allow listing +type allowList struct { + rule +} + +// IsValid for allow list checks if the value is within the allow list +func (w allowList) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// excludeList is a generic rule for exclude listing +type excludeList struct { + rule +} + +// IsValid for exclude list checks if the value is within the exclude list +func (b excludeList) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 0000000..6aa2ed2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 0000000..cf672b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,14 @@ +//go:build !go1.7 +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 0000000..21fe74e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,14 @@ +//go:build go1.7 +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 0000000..02cbd97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000..7711ec7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,25 @@ +//go:build go1.5 +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000..4d78162 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,854 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + excludeList{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a allow list for build canonical headers. +var requiredSignedHeaders = rules{ + allowList{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + patterns{"X-Amz-Object-Lock-"}, +} + +// allowedHoisting is a allow list for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + excludeList{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerItems := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerItems[i] = "host:" + ctx.Request.Host + } else { + headerItems[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") + } + } + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000..98751ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 0000000..fed561b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,13 @@ +//go:build go1.8 +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 0000000..95282db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,30 @@ +//go:build !go1.8 +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000..38255e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.44.66" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 0000000..3653453 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,41 @@ +//go:build !go1.7 +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 0000000..e83a998 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 0000000..0895d53 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 0000000..0b76999 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 0000000..1e55bbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,42 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 0000000..04345a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 0000000..91ba2a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 0000000..6e545b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,18 @@ +//go:build gofuzz +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 0000000..3b0ca7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 0000000..582c024 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 0000000..0ba3194 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,350 @@ +package ini + +import ( + "fmt" + "io" +) + +// ParseState represents the current state of the parser. +type ParseState uint + +// State enums for the parse table +const ( + InvalidState ParseState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: { + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: { + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: { + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: { + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + root.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 0000000..34a481a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,340 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isCaselessLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = isCaselessLitValue(runesTrue, v.raw) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 0000000..e52ac39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 0000000..a45c0bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 0000000..8a84c7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 0000000..4572870 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 0000000..7f01cf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 0000000..f82095b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 0000000..da7a404 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 0000000..18f3fe8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 0000000..b5480fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isCaselessLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 0000000..081cf43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,169 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 0000000..99915f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 0000000..7ffb4ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 0000000..bf18031 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 0000000..216c4ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,94 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 0000000..1e10f8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 0000000..513154c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 0000000..4290ff6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,202 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 0000000..ef43d6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,45 @@ +package s3shared + +import ( + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 0000000..0b9b0df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 0000000..6c44398 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 0000000..037a998 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,11 @@ +//go:build !go1.7 +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 0000000..65e7c60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,13 @@ +//go:build go1.7 +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 0000000..a845287 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,16 @@ +//go:build go1.10 +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 0000000..a3ae3e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,57 @@ +//go:build !go1.10 +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 0000000..0c9802d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 0000000..4bae66c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,12 @@ +//go:build go1.6 +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 0000000..3a6ab88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,25 @@ +//go:build !go1.6 +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 0000000..38ea61a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 0000000..7da8a49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 0000000..ebcbc2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 0000000..d008ae2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 0000000..14ad0c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 0000000..e045f38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 0000000..1510549 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 0000000..4743393 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,216 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + r: r, + } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 0000000..ffade3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,162 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + logger aws.Logger + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) (err error) { + e.headersBuf.Reset() + + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 0000000..5481ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 0000000..0a63340 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,81 @@ +package eventstreamapi + +import ( + "fmt" + "sync" +) + +// InputWriterCloseErrorCode is used to denote an error occurred +// while closing the event stream input writer. +const InputWriterCloseErrorCode = "EventStreamInputWriterCloseError" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go new file mode 100644 index 0000000..0e4aa42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -0,0 +1,173 @@ +package eventstreamapi + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + decoder *eventstream.Decoder, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + decoder: decoder, + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + return nil, r.unmarshalEventException(msg) + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } + } +} + +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 0000000..e46b8ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 0000000..3a7ba5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 0000000..433bb16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 0000000..4bf2b27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go new file mode 100644 index 0000000..2ee2c36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go @@ -0,0 +1,19 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event +// stream functionality. Go 1.15 through 1.17 HTTP client could hang forever +// when an HTTP/2 connection failed with an non-200 status code and err. Using +// Expect 100-Continue, allows the HTTP client to gracefully handle the non-200 +// status code, and close the connection. +// +// This is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { + r.Handlers.Sign.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Set("Expect", "100-Continue") + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 0000000..7d7a793 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,63 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 0000000..f6f8c56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 0000000..9f509d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,506 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 0000000..f7427da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 0000000..1f1d27a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,104 @@ +package protocol + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 0000000..915b0fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000..53831df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 0000000..2aec806 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,298 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + if !value.IsValid() && tag.Get("type") != "structure" { + return nil + } + } + + buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 0000000..8b2c9bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,304 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 0000000..d9aa271 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,87 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 0000000..c0c52e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 0000000..776d110 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 0000000..0ea0647 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 0000000..9d521dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000..d40346a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000..75866d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000..9231e95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000..831b011 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000..63f66af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,333 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as string but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } + str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000..b54c99e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,54 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +const nopayloadPayloadType = "nopayload" + +// PayloadType returns the type of a payload field member of i if there is one, +// or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + + if field, ok := v.Type().FieldByName("_"); ok { + if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { + return nopayloadPayloadType + } + + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000..cdef403 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,264 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return fmt.Errorf("failed to decode JSONValue, %v", err) + } + header = string(b) + } + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 0000000..2e0e205 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 0000000..d756d8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,134 @@ +package restjson + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + code := resp.Header.Get(errorTypeHeader) + msg := resp.Header.Get(errorMessageHeader) + + body := resp.Body + if len(code) == 0 { + // If unable to get code from HTTP headers have to parse JSON message + // to determine what kind of exception this will be. + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + + body = ioutil.NopCloser(&buf) + code = jsonErr.Code + msg = jsonErr.Message + } + + // If code has colon separators remove them so can compare against modeled + // exception names. + code = strings.SplitN(code, ":", 2)[0] + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + code := r.HTTPResponse.Header.Get(errorTypeHeader) + if code == "" { + code = jsonErr.Code + } + msg := r.HTTPResponse.Header.Get(errorMessageHeader) + if msg == "" { + msg = jsonErr.Message + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 0000000..b1ae364 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 0000000..d9a4e76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,134 @@ +package protocol + +import ( + "bytes" + "fmt" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: // Smithy HTTPDate format + return tryParse(value, + RFC822TimeFormat, + rfc822TimeFormatSingleDigitDay, + rfc822TimeFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) + case ISO8601TimeFormatName: // Smithy DateTime format + return tryParse(value, + ISO8601TimeFormat, + iso8601TimeFormatNoZ, + time.RFC3339Nano, + time.RFC3339, + ) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s bytes.Buffer + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000..f614ef8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 0000000..cc857f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000..2fbb93a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,317 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else if len(xname.Local) == 0 { + current.Text = str + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 0000000..c1a5118 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000..107c053 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,299 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000..c85b79f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,173 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// textEncoder is a string type alias that implemnts the TextMarshaler interface. +// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped. +type textEncoder string + +func (t textEncoder) MarshalText() ([]byte, error) { + return []byte(t), nil +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + startElement := xml.StartElement{Name: node.Name, Attr: attrs} + + if node.Text != "" { + e.EncodeElement(textEncoder(node.Text), startElement) + return e.Flush() + } + + e.EncodeToken(startElement) + + if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(startElement.End()) + + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 0000000..13a367d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,42213 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// action and ensure that the parts list is empty. +// +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this action to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This action +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// You cannot use Content-Type: application/x-www-form-urlencoded with Complete +// Multipart Upload requests. Also, if you do not provide a Content-Type header, +// CompleteMultipartUpload returns a 200 OK response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// CompleteMultipartUpload has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy (UploadPartCopy) API. For more information, see +// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (http://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition +// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Server-side encryption +// +// When you perform a CopyObject operation, you can optionally use the appropriate +// encryption-related headers to encrypt the object using server-side encryption +// with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a +// customer-provided encryption key. With server-side encryption, Amazon S3 +// encrypts your data as it writes it to disks in its data centers and decrypts +// the data when you access it. For more information about server-side encryption, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the +// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// Amazon Web Services accounts or to predefined groups defined by Amazon S3. +// These permissions are then added to the ACL on the object. For more information, +// see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// If the bucket that you're copying objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify +// an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this +// ACL expressed in the XML format. +// +// For more information, see Controlling ownership of objects and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for Object Ownership, +// all objects written to the bucket by any account will be owned by the bucket +// owner. +// +// Checksums +// +// When copying an object, if it has a checksum, that checksum will be copied +// to the new object by default. When you copy the object over, you may optionally +// specify a different checksum algorithm to use with the x-amz-checksum-algorithm +// header. +// +// Storage Class Options +// +// You can use the CopyObject action to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 User Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// +// The following operations are related to CopyObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new S3 bucket. To create a bucket, you must register with Amazon +// S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. +// +// Not every string is an acceptable bucket name. For information about bucket +// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +// +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// Access control lists (ACLs) +// +// When creating a bucket using this operation, you can optionally configure +// the bucket ACL to specify the accounts or groups that should be granted specific +// permissions on the bucket. +// +// If your CreateBucket request sets bucket owner enforced for S3 Object Ownership +// and specifies a bucket ACL that provides access to an external Amazon Web +// Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership +// error code. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access control list (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified +// by account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Permissions +// +// In addition to s3:CreateBucket, the following permissions are required when +// your CreateBucket includes specific headers: +// +// * ACLs - If your CreateBucket request specifies ACL permissions and the +// ACL is public-read, public-read-write, authenticated-read, or if you specify +// access permissions explicitly through any other ACL, both s3:CreateBucket +// and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket +// request is private or doesn't specify any ACLs, only s3:CreateBucket permission +// is needed. +// +// * Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket +// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning +// permissions are required. +// +// * S3 Object Ownership - If your CreateBucket request includes the the +// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission +// is required. +// +// The following operations are related to CreateBucket: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all Amazon Web Services Regions except in the North +// Virginia Region. For legacy compatibility, if you re-create an existing bucket +// that you already own in the North Virginia Region, Amazon S3 returns 200 +// OK and resets the bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (Amazon Web Services +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. +// If you choose to provide your own encryption key, the request headers you +// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an Amazon Web Services +// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// actions on the key. These permissions are required because Amazon S3 must +// decrypt and read data from the encrypted file parts before it completes the +// multipart upload. For more information, see Multipart upload API and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// in the Amazon S3 User Guide. +// +// If your Identity and Access Management (IAM) user or role is in the same +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role belongs to a different account +// than the key, then you must have the permissions on both the key policy and +// your IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use Amazon Web Services managed encryption keys or provide your own encryption +// key. +// +// * Use encryption keys managed by Amazon S3 or customer managed key stored +// in Amazon Web Services Key Management Service (Amazon Web Services KMS) +// – If you want Amazon Web Services to manage the keys used to encrypt +// data, specify the following headers in the request. x-amz-server-side-encryption +// x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context +// If you specify x-amz-server-side-encryption:aws:kms, but don't provide +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon +// Web Services managed key in Amazon Web Services KMS to protect the data. +// All GET and PUT requests for an object protected by Amazon Web Services +// KMS fail if you don't make them with SSL or by using SigV4. For more information +// about server-side encryption with KMS key (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// Amazon Web Services accounts or to predefined groups defined by Amazon S3. +// These permissions are then added to the access control list (ACL) on the +// object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific Amazon Web Services accounts or groups, use the +// following headers. Each header maps to specific permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// Amazon Web Services account uri – if you are granting permissions to +// a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified +// by account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. +// +// Related Resources: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE action removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must have the DeleteBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the Amazon Web Services account +// that owns a bucket can always use this operation, even if the policy explicitly +// denies the root user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects but will still respond +// that the command was successful. +// +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// The following action is related to DeleteObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete action and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This implementation of the GET action uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information +// set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about CORS, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the default encryption configuration for an Amazon S3 bucket. If +// the bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. +// +// For information about the Amazon S3 default encryption feature, see Amazon +// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// To use this implementation of the operation, you must be the bucket owner. +// +// To use this API against an access point, provide the alias of the access +// point in place of the bucket name. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// The following operations are related to GetBucketLogging: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following action is related to GetBucketNotification: +// +// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// * PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the Amazon Web Services account that owns the bucket, +// the calling identity must have the GetBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the Amazon Web Services account +// that owns a bucket can always use this operation, even if the policy explicitly +// denies the root user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following action is related to GetBucketPolicy: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSet Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET action requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// +// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier +// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering +// Deep Archive tiers, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this action returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption +// keys (SSE-S3). If your object does use these types of keys, you’ll get +// an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have the relevant permission to read object tags, the response +// also returns the x-amz-tagging-count header that provides the count of number +// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. +// +// * If you supply a versionId, you need the s3:GetObjectVersion permission +// to access a specific version of an object. If you request a specific version, +// you do not need to have the s3:GetObject permission. +// +// * If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in +// the response. +// +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have s3:GetObjectAcl permissions or READ_ACP access to the object. +// For more information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide +// +// This action is not supported by Amazon S3 on Outposts. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAttributes = "GetObjectAttributes" + +// GetObjectAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAttributes for more information on using the GetObjectAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAttributesRequest method. +// req, resp := client.GetObjectAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *request.Request, output *GetObjectAttributesOutput) { + op := &request.Operation{ + Name: opGetObjectAttributes, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?attributes", + } + + if input == nil { + input = &GetObjectAttributesInput{} + } + + output = &GetObjectAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAttributes API operation for Amazon Simple Storage Service. +// +// Retrieves all the metadata from an object without returning the object itself. +// This action is useful if you're interested only in an object's metadata. +// To use GetObjectAttributes, you must have READ access to the object. +// +// GetObjectAttributes combines the functionality of GetObjectAcl, GetObjectLegalHold, +// GetObjectLockConfiguration, GetObjectRetention, GetObjectTagging, HeadObject, +// and ListParts. All of the data returned with each of those individual calls +// can be returned with a single call to GetObjectAttributes. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// * Encryption request headers, such as x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management +// Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption +// keys (SSE-S3). If your object does use these types of keys, you'll get +// an HTTP 400 Bad Request error. +// +// * The last modified property in this case is the creation date of the +// object. +// +// Consider the following when using request headers: +// +// * If both of the If-Match and If-Unmodified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since +// condition evaluates to false. +// +// * If both of the If-None-Match and If-Modified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since +// condition evaluates to true. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// The permissions that you need to use this operation depend on whether the +// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion +// and s3:GetObjectVersionAttributes permissions for this operation. If the +// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes +// permissions. For more information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found ("no such key") error. +// +// * If you don't have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden ("access denied") error. +// +// The following actions are related to GetObjectAttributes: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// * GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// * GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// * GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributes(input *GetObjectAttributesInput) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + return out, req.Send() +} + +// GetObjectAttributesWithContext is the same as GetObjectAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAttributesWithContext(ctx aws.Context, input *GetObjectAttributesInput, opts ...request.Option) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Gets an object's current legal hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// The following action is related to GetObjectLockConfiguration: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET action returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following actions are related to GetObjectTagging: +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// To use this API against an access point, you must provide the alias of the +// access point in place of the bucket name or specify the access point ARN. +// When using the access point ARN, you must direct requests to the access point +// hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. +// When using the Amazon Web Services SDKs, you provide the ARN in place of +// the bucket name. For more information see, Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. +// +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, +// you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following actions are related to HeadObject: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This action lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This action returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset +// of the objects in a bucket. A 200 OK response can contain valid or invalid +// XML. Make sure to design your application to parse the contents of the response +// and handle it appropriately. Objects are returned sorted in an ascending +// order of the respective key names in the list. For more information about +// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// +// To use this operation, you must have READ access to the bucket. +// +// To use this action in an Identity and Access Management (IAM) policy, you +// must have permissions to perform the s3:ListBucket action. The bucket owner +// has this permission by default and can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// The following operations are related to ListObjectsV2: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. +// +// If the upload was created using a checksum algorithm, you will need to have +// permission to the kms:Decrypt action for the request to succeed. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use the x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects +// permission to LogDelivery group predefined by Amazon S3 and two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +// +// Related Resources +// +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This action uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Key for an existing bucket. +// +// Default encryption for a bucket can use server-side encryption with Amazon +// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify +// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket +// Key. When the default encryption is SSE-KMS, if you upload an object to the +// bucket and do not specify the KMS key to use for encryption, Amazon S3 uses +// the default Amazon Web Services managed KMS key for your account. For information +// about default encryption, see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. +// +// This action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration +// bucket permission to set the configuration on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the Amazon Web Services account that +// created the resource, can access it. The resource owner can optionally grant +// access permissions to others by writing an access policy. For this operation, +// users must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the Amazon Web Services account that created the bucket—can perform +// any of the operations. A resource owner can also grant others permission +// to perform the operation. For more information, see the following topics +// in the Amazon S3 User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information +// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. An Amazon +// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the Amazon Web Services +// account that created it) can access the resource. The resource owner can +// optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same Amazon Web Services Region as the source bucket. To set the logging +// status of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in +// the Amazon S3 User Guide. +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This action replaces the existing notification configuration with the configuration +// you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations +// that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" + +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opPutBucketOwnershipControls, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &PutBucketOwnershipControlsInput{} + } + + output = &PutBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying permissions +// in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). +// +// The following operations are related to PutBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the Amazon Web Services account that +// owns the bucket, the calling identity must have the PutBucketPolicy permissions +// on the specified bucket and belong to the bucket owner's account in order +// to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the Amazon Web Services account +// that owns a bucket can always use this operation, even if the policy explicitly +// denies the root user the ability to perform this action. +// +// For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// Permissions +// +// To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration +// permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account +// that created the bucket, can perform this operation. The resource owner can +// also grant others permissions to perform the operation. For more information +// about permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag +// key values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of +// that application across several services. For more information, see Cost +// Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to +// an existing list of tags. +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// action is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning configuration, +// you must include the x-amz-mfa request header and the Status and the MfaDelete +// request elements in a request to set the versioning state of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// * To successfully complete the PutObject request, you must have the s3:PutObject +// in your IAM permissions. +// +// * To successfully change the objects acl of your PutObject request, you +// must have the s3:PutObjectAcl in your IAM permissions. +// +// * The Content-MD5 header is required for any request to upload an object +// with a retention period configured using Amazon S3 Object Lock. For more +// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon S3 User Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use Amazon Web Services managed encryption keys +// (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// If you request server-side encryption using Amazon Web Services Key Management +// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For +// more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual Amazon Web Services accounts or to +// predefined groups defined by Amazon S3. These permissions are then added +// to the ACL on the object. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// If the bucket that you're uploading objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify +// an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this +// ACL expressed in the XML format. PUT requests that contain other ACLs (for +// example, custom grants to certain Amazon Web Services accounts) fail and +// return a 400 error with the error code AccessControlListNotSupported. +// +// For more information, see Controlling ownership of objects and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for Object Ownership, +// all objects written to the bucket by any account will be owned by the bucket +// owner. +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD Storage Class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different Storage Class. +// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, +// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 User Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission +// to set the ACL of an object. For more information, see What permissions can +// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Applies a legal hold configuration to the specified object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// * The DefaultRetention settings require both a mode and a period. +// +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +// +// * You can only enable Object Lock for new buckets. If you want to turn +// on Object Lock for an existing bucket, contact Amazon Web Services Support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// Places an Object Retention configuration on an object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// Users or accounts require the s3:PutObjectRetention permission in order to +// place an Object Retention configuration on objects. Bypassing a Governance +// Retention configuration requires the s3:BypassGovernanceRetention permission. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional action +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// This action is not supported by Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same Amazon Web Services Region as the bucket +// that contains the archive object that is being queried. The Amazon Web +// Services account that initiates the job must have permissions to write +// to the S3 bucket. You can specify the storage class and encryption for +// the output objects stored in the bucket. For more information about output, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. For more information about the S3 structure +// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring objects +// +// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage +// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers are not accessible in real time. For objects in Archive Access +// or Deep Archive Access tiers you must first initiate a restore request, and +// then wait until the object is moved into the Frequent Access tier. For objects +// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate +// a restore request, and then wait until a temporary copy of the object is +// available. To access an archived object, you must restore the object for +// the duration (number of days) that you specify. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive +// tier when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// storage class or S3 Intelligent-Tiering Archive tier. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +// are free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. +// They typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. +// +// Responses +// +// A successful action returns either the 200 OK or 202 Accepted status code. +// +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This action is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := NewSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// This action is not supported by Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon +// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), +// server-side encryption is handled transparently, so you don't need to +// specify anything. For more information about server-side encryption, including +// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). +// +// GetObject Support +// +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon S3 User Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// Related Resources +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// +// If the upload request is signed with Signature Version 4, then Amazon Web +// Services S3 uses the x-amz-content-sha256 header as a checksum instead of +// Content-MD5. For more information see Authenticating Requests: Using the +// Authorization Header (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon S3 User Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the Amazon Web Services managed encryption keys. If you +// choose to provide your own encryption key, the request headers you provide +// in the request must match the headers you used in the request to initiate +// the upload by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action +// and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// * For information about copying objects using a single atomic action vs. +// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opWriteGetObjectResponse = "WriteGetObjectResponse" + +// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the +// client's request for the WriteGetObjectResponse operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the WriteGetObjectResponseRequest method. +// req, resp := client.WriteGetObjectResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { + op := &request.Operation{ + Name: opWriteGetObjectResponse, + HTTPMethod: "POST", + HTTPPath: "/WriteGetObjectResponse", + } + + if input == nil { + input = &WriteGetObjectResponseInput{} + } + + output = &WriteGetObjectResponseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// WriteGetObjectResponse API operation for Amazon Simple Storage Service. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see Transforming +// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), +// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when +// it internally invokes GetObject. When WriteGetObjectResponse is called by +// a customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// +// Amazon Web Services provides some prebuilt Lambda functions that you can +// use with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in +// the Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, +// a natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, +// is equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see Using Amazon +// Web Services built Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation WriteGetObjectResponse for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + return out, req.Send() +} + +// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of +// the ability to pass a context and additional request options. +// +// See WriteGetObjectResponse for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all Amazon Web Services +// accounts. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetID sets the ID field's value. +func (s *CORSRule) SetID(v string) *CORSRule { + s.ID = &v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. + Comments *string `type:"string"` + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV + QuoteCharacter *string `type:"string"` + + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". + QuoteEscapeCharacter *string `type:"string"` + + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Checksum) SetChecksumCRC32(v string) *Checksum { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Checksum) SetChecksumCRC32C(v string) *Checksum { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Checksum) SetChecksumSHA1(v string) *Checksum { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Checksum) SetChecksumSHA256(v string) *Checksum { + s.ChecksumSHA256 = &v + return s +} + +// Container for specifying the Lambda notification configuration. +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // Bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The role supporting the invocation of the Lambda function + InvocationRole *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32C(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA1(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA256(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA256 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CompleteMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKey(v string) *CompleteMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CompleteMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CompleteMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Bucket *string `type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. For more information about how + // the entity tag is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32C(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA1(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA256(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back + // an HTTP 400 response. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompletedPart) SetChecksumCRC32(v string) *CompletedPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompletedPart) SetChecksumCRC32C(v string) *CompletedPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompletedPart) SetChecksumSHA1(v string) *CompletedPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompletedPart) SetChecksumSHA256(v string) *CompletedPart { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value + // must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Alternatively, for objects accessed through + // Amazon S3 on Outposts, specify the ARN of the object as accessed in the + // format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the source bucket + // is owned by a different account, the request fails with the HTTP status code + // 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a legal hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the Amazon Web Services KMS key ID to use for object encryption. + // All GET and PUT requests for an object protected by Amazon Web Services KMS + // will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported Amazon Web Services SDKs and Amazon + // Web Services CLI, see Specifying the Signature Version in Request Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CopyObjectInput) SetChecksumAlgorithm(v string) *CopyObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string `type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyObjectResult) SetChecksumCRC32(v string) *CopyObjectResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyObjectResult) SetChecksumCRC32C(v string) *CopyObjectResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyObjectResult) SetChecksumSHA1(v string) *CopyObjectResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyObjectResult) SetChecksumSHA256(v string) *CopyObjectResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyPartResult) SetChecksumCRC32(v string) *CopyPartResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyPartResult) SetChecksumCRC32C(v string) *CopyPartResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyPartResult) SetChecksumSHA1(v string) *CopyPartResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyPartResult) SetChecksumSHA256(v string) *CopyPartResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control + // canned ACL or an equivalent form of this ACL expressed in the XML format. + ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *CreateBucketInput) SetObjectOwnership(v string) *CreateBucketInput { + s.ObjectOwnership = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // A forward slash followed by the name of the bucket. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket to which to initiate the upload + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a legal hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information + // about configuring using any of the officially supported Amazon Web Services + // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in + // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetChecksumAlgorithm(v string) *CreateMultipartUploadInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Bucket *string `locationName:"Bucket" type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetChecksumAlgorithm(v string) *CreateMultipartUploadOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// +// * The DefaultRetention settings require both a mode and a period. +// +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The objects to delete. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The bucket that has the tag set to be removed. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key that identifies the object in the bucket from which to remove all + // tags. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *DeleteObjectsInput) SetChecksumAlgorithm(v string) *DeleteObjectsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete action that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the Amazon Web Services account that owns the destination bucket. If this + // is not specified in the replication configuration, the replicas are owned + // by same Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the Amazon Web Services account + // that owns the destination bucket by specifying the AccessControlTranslation + // property, this is the account ID of the destination bucket owner. For more + // information, see Replication Additional Configuration: Changing the Replica + // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Contains the type of server-side encryption used. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (for example, AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed key to use for encryption of job results. + // Amazon S3 only supports symmetric keys. For more information, see Using symmetric + // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Encryption's + // String and GoString methods. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) + // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric, customer managed KMS keys. For more information, + // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your Amazon + // Web Services account that prevents the action from completing successfully. + // Contact Amazon Web Services Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact Amazon Web Services Support for further assistance. + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all Amazon + // Web Services Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The Amazon Web Services access + // key ID you provided does not exist in our records. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact Amazon Web Services Support for further assistance. HTTP + // Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact Amazon Web Services Support for + // more information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact Amazon Web Services Support for more + // information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: Amazon S3 (http://aws.amazon.com/s3) + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional action + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your Amazon Web Services + // secret access key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) GoString() string { + return s.String() +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the notification configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // currently in effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The bucket name for which to get the bucket policy. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // The bucket name for which to get the replication information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // The name of the bucket for which to get the payment request configuration + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The name of the bucket for which to get the tagging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectAttributesInput struct { + _ struct{} `locationName:"GetObjectAttributesRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` + + // An XML header that specifies the fields at the root level that you want returned + // in the response. Fields that you do not specify are not returned. + // + // ObjectAttributes is a required field + ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectAttributesInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // The version ID used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAttributesInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.ObjectAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectAttributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAttributesInput) SetBucket(v string) *GetObjectAttributesInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAttributesInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAttributesInput) SetExpectedBucketOwner(v string) *GetObjectAttributesInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAttributesInput) SetKey(v string) *GetObjectAttributesInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesInput) SetMaxParts(v int64) *GetObjectAttributesInput { + s.MaxParts = &v + return s +} + +// SetObjectAttributes sets the ObjectAttributes field's value. +func (s *GetObjectAttributesInput) SetObjectAttributes(v []*string) *GetObjectAttributesInput { + s.ObjectAttributes = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesInput) SetPartNumberMarker(v int64) *GetObjectAttributesInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAttributesInput) SetRequestPayer(v string) *GetObjectAttributesInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectAttributesInput) SetSSECustomerAlgorithm(v string) *GetObjectAttributesInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKey(v string) *GetObjectAttributesInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectAttributesInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKeyMD5(v string) *GetObjectAttributesInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesInput) SetVersionId(v string) *GetObjectAttributesInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAttributesInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAttributesInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAttributesInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAttributesOutput struct { + _ struct{} `type:"structure"` + + // The checksum or digest of the object. + Checksum *Checksum `type:"structure"` + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `type:"string"` + + // The creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A collection of parts associated with a multipart upload. + ObjectParts *GetObjectAttributesParts `type:"structure"` + + // The size of the object in bytes. + ObjectSize *int64 `type:"long"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `type:"string" enum:"StorageClass"` + + // The version ID of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *GetObjectAttributesOutput) SetChecksum(v *Checksum) *GetObjectAttributesOutput { + s.Checksum = v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectAttributesOutput) SetDeleteMarker(v bool) *GetObjectAttributesOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectAttributesOutput) SetETag(v string) *GetObjectAttributesOutput { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectAttributesOutput) SetLastModified(v time.Time) *GetObjectAttributesOutput { + s.LastModified = &v + return s +} + +// SetObjectParts sets the ObjectParts field's value. +func (s *GetObjectAttributesOutput) SetObjectParts(v *GetObjectAttributesParts) *GetObjectAttributesOutput { + s.ObjectParts = v + return s +} + +// SetObjectSize sets the ObjectSize field's value. +func (s *GetObjectAttributesOutput) SetObjectSize(v int64) *GetObjectAttributesOutput { + s.ObjectSize = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAttributesOutput) SetRequestCharged(v string) *GetObjectAttributesOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectAttributesOutput) SetStorageClass(v string) *GetObjectAttributesOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesOutput) SetVersionId(v string) *GetObjectAttributesOutput { + s.VersionId = &v + return s +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + _ struct{} `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number + // of parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // The maximum number of parts allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the PartNumberMarker request parameter in + // a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // The marker for the current part. + PartNumberMarker *int64 `type:"integer"` + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` + + // The total number of parts. + TotalPartsCount *int64 `locationName:"PartsCount" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetObjectAttributesParts) SetIsTruncated(v bool) *GetObjectAttributesParts { + s.IsTruncated = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesParts) SetMaxParts(v int64) *GetObjectAttributesParts { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetNextPartNumberMarker(v int64) *GetObjectAttributesParts { + s.NextPartNumberMarker = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetPartNumberMarker(v int64) *GetObjectAttributesParts { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *GetObjectAttributesParts) SetParts(v []*ObjectPart) *GetObjectAttributesParts { + s.Parts = v + return s +} + +// SetTotalPartsCount sets the TotalPartsCount field's value. +func (s *GetObjectAttributesParts) SetTotalPartsCount(v int64) *GetObjectAttributesParts { + s.TotalPartsCount = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this mode must be enabled. + // + // The AWS SDK for Go v1 does not support automatic response payload checksum + // validation. This feature is available in the AWS SDK for Go v2. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use to when decrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *GetObjectInput) SetChecksumMode(v string) *GetObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket name containing the object whose legal hold status you want to + // retrieve. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose legal hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current legal hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration action and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *GetObjectOutput) SetChecksumCRC32(v string) *GetObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *GetObjectOutput) SetChecksumCRC32C(v string) *GetObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *GetObjectOutput) SetChecksumSHA1(v string) *GetObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *GetObjectOutput) SetChecksumSHA256(v string) *GetObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // The name of the bucket containing the object for which to get the torrent + // files. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key for which to get the information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this parameter must be enabled. + // + // In addition, if you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you + // must have permission to use the kms:Decrypt action for the request to succeed. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Because HeadObject returns only the metadata for an object, this parameter + // has no effect. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *HeadObjectInput) SetChecksumMode(v string) *HeadObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket, Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in + // your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets, the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If the object is stored using server-side encryption either with an Amazon + // Web Services KMS key or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *HeadObjectOutput) SetChecksumCRC32(v string) *HeadObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *HeadObjectOutput) SetChecksumCRC32C(v string) *HeadObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *HeadObjectOutput) SetChecksumSHA1(v string) *HeadObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *HeadObjectOutput) SetChecksumSHA256(v string) *HeadObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list" enum:"InventoryOptionalField"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + AccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // no longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeGreaterThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeLessThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeGreaterThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeLessThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMarker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Bucket name to list. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListPartsInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *ListPartsInput) SetSSECustomerAlgorithm(v string) *ListPartsInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *ListPartsInput) SetSSECustomerKey(v string) *ListPartsInput { + s.SSECustomerKey = &v + return s +} + +func (s *ListPartsInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *ListPartsInput) SetSSECustomerKeyMD5(v string) *ListPartsInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an Amazon Web Services account, this element provides the + // same information as the Owner element. If the initiator is an IAM User, this + // element provides the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *Owner `type:"structure"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ListPartsOutput) SetChecksumAlgorithm(v string) *ListPartsOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Contains the type of server-side encryption used. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon S3 API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see Permissions for server access + // log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + // Name of the Object. + Name *string `type:"string"` + + // Value of the Object. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + EventThreshold *ReplicationTimeValue `type:"structure"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string `type:"string"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { + s.AccessPointArn = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating a metrics filter. + AccessPointArn *string `type:"string"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { + s.AccessPointArn = &v + return s +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *MultipartUpload) SetChecksumAlgorithm(v string) *MultipartUpload { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are + // this many more recent noncurrent versions, Amazon S3 will take the associated + // action. For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionExpiration) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionExpiration { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon +// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at +// a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are + // this many more recent noncurrent versions, Amazon S3 will take the associated + // action. For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionTransition) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionTransition { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration `type:"structure"` + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventBridgeConfiguration sets the EventBridgeConfiguration field's value. +func (s *NotificationConfiguration) SetEventBridgeConfiguration(v *EventBridgeConfiguration) *NotificationConfiguration { + s.EventBridgeConfiguration = v + return s +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Container for specifying the Lambda notification configuration. + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object + // data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object + // data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, + // and therefore the ETag will not be an MD5 digest. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + Owner *Owner `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *Object) SetChecksumAlgorithm(v []*string) *Object { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // Specifies the Object Lock rule for the specified object. Enable the this + // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings + // require both a mode and a period. The period can be either Days or Years + // but you must select one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a legal hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default Object Lock retention mode and period that you want to apply + // to new objects placed in the specified bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// A container for elements related to an individual part. +type ObjectPart struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` + + // The size of the uploaded part in bytes. + Size *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *ObjectPart) SetChecksumCRC32(v string) *ObjectPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *ObjectPart) SetChecksumCRC32C(v string) *ObjectPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *ObjectPart) SetChecksumSHA1(v string) *ObjectPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *ObjectPart) SetChecksumSHA256(v string) *ObjectPart { + s.ChecksumSHA256 = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *ObjectPart) SetPartNumber(v int64) *ObjectPart { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectPart) SetSize(v int64) *ObjectPart { + s.Size = &v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ObjectVersion) SetChecksumAlgorithm(v []*string) *ObjectVersion { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control + // canned ACL or an equivalent form of this ACL expressed in the XML format. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) GoString() string { + return s.String() +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Part) SetChecksumCRC32(v string) *Part { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Part) SetChecksumCRC32C(v string) *Part { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Part) SetChecksumSHA1(v string) *Part { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Part) SetChecksumSHA256(v string) *Part { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if + // the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Container for setting the transfer acceleration state. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAccelerateConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketAccelerateConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAclInput) SetChecksumAlgorithm(v string) *PutBucketAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // S3 User Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketCorsInput) SetChecksumAlgorithm(v string) *PutBucketCorsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketEncryptionInput) SetChecksumAlgorithm(v string) *PutBucketEncryptionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLoggingInput) SetChecksumAlgorithm(v string) *PutBucketLoggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True + // or false value. + SkipDestinationValidation *bool `location:"header" locationName:"x-amz-skip-destination-validation" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// SetSkipDestinationValidation sets the SkipDestinationValidation field's value. +func (s *PutBucketNotificationConfigurationInput) SetSkipDestinationValidation(v bool) *PutBucketNotificationConfigurationInput { + s.SkipDestinationValidation = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketNotificationInput) SetChecksumAlgorithm(v string) *PutBucketNotificationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // that you want to apply to this Amazon S3 bucket. + // + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketPolicyInput) SetChecksumAlgorithm(v string) *PutBucketPolicyInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // The name of the bucket + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketReplicationInput) SetChecksumAlgorithm(v string) *PutBucketReplicationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for Payer. + // + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketRequestPaymentInput) SetChecksumAlgorithm(v string) *PutBucketRequestPaymentInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the TagSet and Tag elements. + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketTaggingInput) SetChecksumAlgorithm(v string) *PutBucketTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketVersioningInput) SetChecksumAlgorithm(v string) *PutBucketVersioningInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketWebsiteInput) SetChecksumAlgorithm(v string) *PutBucketWebsiteInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT action was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectAclInput) SetChecksumAlgorithm(v string) *PutObjectAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT action was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. Must + // be formatted as a timestamp parameter. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetrical customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms, but + // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses + // the Amazon Web Services managed key to protect the data. If the KMS key does + // not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectInput) SetChecksumAlgorithm(v string) *PutObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectInput) SetChecksumCRC32(v string) *PutObjectInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectInput) SetChecksumCRC32C(v string) *PutObjectInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectInput) SetChecksumSHA1(v string) *PutObjectInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectInput) SetChecksumSHA256(v string) *PutObjectInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a legal hold + // on. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to place a legal hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a legal hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLegalHoldInput) SetChecksumAlgorithm(v string) *PutObjectLegalHoldInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLockConfigurationInput) SetChecksumAlgorithm(v string) *PutObjectLockConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an Amazon Web Services + // KMS key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectOutput) SetChecksumCRC32(v string) *PutObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectOutput) SetChecksumCRC32C(v string) *PutObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectOutput) SetChecksumSHA1(v string) *PutObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectOutput) SetChecksumSHA256(v string) *PutObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectRetentionInput) SetChecksumAlgorithm(v string) *PutObjectRetentionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectTaggingInput) SetChecksumAlgorithm(v string) *PutObjectTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutPublicAccessBlockInput) SetChecksumAlgorithm(v string) *PutPublicAccessBlockInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // A collection of bucket events for which to send notifications + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + Queue *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// The container for the records event. +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) + // role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon S3 User Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 User Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon S3 User Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer managed key stored in Amazon Web Services Key Management Service + // (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag. +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid value: 15 + Minutes *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *RestoreObjectInput) SetChecksumAlgorithm(v string) *RestoreObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon S3 API Reference. For examples, see Put Bucket Lifecycle Configuration +// Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object. + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, + // GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled + // (or versioning is suspended), you can set this action to request that Amazon + // S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, + // INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at + // a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon S3 User Guide. + Transition *Transition `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the Amazon Web Services Key Management Service (Amazon + // Web Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. + // + // KeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SSEKMS's + // String and GoString methods. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) GoString() string { + return s.String() +} + +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file. For example, 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events writes for SelectObjectContentEventStream. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) +} + +func (r *readSelectObjectContentEventStream) Err() error { + return r.err.Err() +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + case "End": + return &EndEvent{}, nil + case "Progress": + return &ProgressEvent{}, nil + case "Records": + return &RecordsEvent{}, nil + case "Stats": + return &StatsEvent{}, nil + default: + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil + } +} + +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SelectObjectContentInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + EventStream *SelectObjectContentEventStream +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. If you don't specify a customer +// managed key at configuration, Amazon S3 automatically creates an Amazon Web +// Services KMS key in your Amazon Web Services account the first time that +// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon +// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if + // and only if SSEAlgorithm is set to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. + // However, if you are using encryption with cross-account or Amazon Web Services + // service operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For + // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSMasterKeyID is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's + // String and GoString methods. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + BucketKeyEnabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer managed key stored in Amazon Web Services Key Management Service +// (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an Amazon Web Services KMS key stored in Amazon Web Services Key Management + // Service. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// Container for the stats details. +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +// Container for the Stats Event. +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see Permissions server access +// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of events related to objects + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Alternatively, for objects accessed through + // Amazon S3 on Outposts, specify the ARN of the object as accessed in the + // format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the source bucket + // is owned by a different account, the request fails with the HTTP status code + // 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *UploadPartInput) SetChecksumAlgorithm(v string) *UploadPartInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartInput) SetChecksumCRC32(v string) *UploadPartInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartInput) SetChecksumCRC32C(v string) *UploadPartInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartInput) SetChecksumSHA1(v string) *UploadPartInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartInput) SetChecksumSHA256(v string) *UploadPartInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartOutput) SetChecksumCRC32(v string) *UploadPartOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartOutput) SetChecksumCRC32C(v string) *UploadPartOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartOutput) SetChecksumSHA1(v string) *UploadPartOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartOutput) SetChecksumSHA256(v string) *UploadPartOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +type WriteGetObjectResponseInput struct { + _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` + + // The object data. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + Body io.ReadSeeker `type:"blob"` + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32 checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32C checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 160-bit SHA-1 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 256-bit SHA-256 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` + + // The size of the content body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) + // a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` + + // A string that uniquely identifies an error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in the body. All error codes from S3 are sentence-cased. The + // regular expression (regex) value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` + + // Contains a generic description of the error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in body. + ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id + // is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` + + // The date and time that the object was last modified. + LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Set to the number of metadata entries not returned in x-amz-meta headers. + // This can happen if you create metadata using an API like SOAP that supports + // more flexible metadata than the REST API. For example, using SOAP, you can + // create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` + + // Indicates if request involves bucket that is either a source or destination + // in a Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Route prefix to the HTTP URL generated. + // + // RequestRoute is a required field + RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` + + // A single use encrypted token that maps WriteGetObjectResponse to the end + // user GetObject request. + // + // RequestToken is a required field + RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 + // to encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The integer status code for an HTTP response of a corresponding GetObject + // request. + // + // Status Codes + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - Internal Server Error + // + // * 503 - Service Unavailable + StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` + + // An ID used to reference a specific version of the object. + VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteGetObjectResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} + if s.RequestRoute == nil { + invalidParams.Add(request.NewErrParamRequired("RequestRoute")) + } + if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) + } + if s.RequestToken == nil { + invalidParams.Add(request.NewErrParamRequired("RequestToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { + s.ETag = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { + s.ErrorMessage = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { + s.RequestCharged = &v + return s +} + +// SetRequestRoute sets the RequestRoute field's value. +func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { + s.RequestRoute = &v + return s +} + +// SetRequestToken sets the RequestToken field's value. +func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { + s.RequestToken = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { + s.ServerSideEncryption = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { + s.StatusCode = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { + s.VersionId = &v + return s +} + +func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { + return map[string]string{ + "RequestRoute": aws.StringValue(s.RequestRoute), + } +} + +type WriteGetObjectResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) GoString() string { + return s.String() +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" + + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +const ( + // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32 = "CRC32" + + // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32c = "CRC32C" + + // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha1 = "SHA1" + + // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha256 = "SHA256" +) + +// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum +func ChecksumAlgorithm_Values() []string { + return []string{ + ChecksumAlgorithmCrc32, + ChecksumAlgorithmCrc32c, + ChecksumAlgorithmSha1, + ChecksumAlgorithmSha256, + } +} + +const ( + // ChecksumModeEnabled is a ChecksumMode enum value + ChecksumModeEnabled = "ENABLED" +) + +// ChecksumMode_Values returns all elements of the ChecksumMode enum +func ChecksumMode_Values() []string { + return []string{ + ChecksumModeEnabled, + } +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" + + // EventS3ObjectRestoreDelete is a Event enum value + EventS3ObjectRestoreDelete = "s3:ObjectRestore:Delete" + + // EventS3LifecycleTransition is a Event enum value + EventS3LifecycleTransition = "s3:LifecycleTransition" + + // EventS3IntelligentTiering is a Event enum value + EventS3IntelligentTiering = "s3:IntelligentTiering" + + // EventS3ObjectAclPut is a Event enum value + EventS3ObjectAclPut = "s3:ObjectAcl:Put" + + // EventS3LifecycleExpiration is a Event enum value + EventS3LifecycleExpiration = "s3:LifecycleExpiration:*" + + // EventS3LifecycleExpirationDelete is a Event enum value + EventS3LifecycleExpirationDelete = "s3:LifecycleExpiration:Delete" + + // EventS3LifecycleExpirationDeleteMarkerCreated is a Event enum value + EventS3LifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" + + // EventS3ObjectTagging is a Event enum value + EventS3ObjectTagging = "s3:ObjectTagging:*" + + // EventS3ObjectTaggingPut is a Event enum value + EventS3ObjectTaggingPut = "s3:ObjectTagging:Put" + + // EventS3ObjectTaggingDelete is a Event enum value + EventS3ObjectTaggingDelete = "s3:ObjectTagging:Delete" +) + +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + EventS3ObjectRestoreDelete, + EventS3LifecycleTransition, + EventS3IntelligentTiering, + EventS3ObjectAclPut, + EventS3LifecycleExpiration, + EventS3LifecycleExpirationDelete, + EventS3LifecycleExpirationDeleteMarkerCreated, + EventS3ObjectTagging, + EventS3ObjectTaggingPut, + EventS3ObjectTaggingDelete, + } +} + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" +) + +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" + + // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value + InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" + + // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value + InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" +) + +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + InventoryOptionalFieldBucketKeyStatus, + InventoryOptionalFieldChecksumAlgorithm, + } +} + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + +const ( + // ObjectAttributesEtag is a ObjectAttributes enum value + ObjectAttributesEtag = "ETag" + + // ObjectAttributesChecksum is a ObjectAttributes enum value + ObjectAttributesChecksum = "Checksum" + + // ObjectAttributesObjectParts is a ObjectAttributes enum value + ObjectAttributesObjectParts = "ObjectParts" + + // ObjectAttributesStorageClass is a ObjectAttributes enum value + ObjectAttributesStorageClass = "StorageClass" + + // ObjectAttributesObjectSize is a ObjectAttributes enum value + ObjectAttributesObjectSize = "ObjectSize" +) + +// ObjectAttributes_Values returns all elements of the ObjectAttributes enum +func ObjectAttributes_Values() []string { + return []string{ + ObjectAttributesEtag, + ObjectAttributesChecksum, + ObjectAttributesObjectParts, + ObjectAttributesStorageClass, + ObjectAttributesObjectSize, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +// +// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer +// affect permissions. The bucket owner automatically owns and has full control +// over every object in the bucket. The bucket only accepts PUT requests that +// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control +// canned ACL or an equivalent form of this ACL expressed in the XML format. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" + + // ObjectOwnershipBucketOwnerEnforced is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerEnforced = "BucketOwnerEnforced" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + ObjectOwnershipBucketOwnerEnforced, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" + + // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value + ObjectStorageClassGlacierIr = "GLACIER_IR" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + ObjectStorageClassGlacierIr, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + } +} + +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from Requester Pays buckets, see Downloading Objects +// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 User Guide. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + } +} + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" + + // StorageClassGlacierIr is a StorageClass enum value + StorageClassGlacierIr = "GLACIER_IR" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + StorageClassGlacierIr, + } +} + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" + + // TransitionStorageClassGlacierIr is a TransitionStorageClass enum value + TransitionStorageClassGlacierIr = "GLACIER_IR" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + TransitionStorageClassGlacierIr, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 0000000..407f06b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 0000000..9ba8a78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 0000000..229606b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,89 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset { + if aws.BoolValue(c.Config.UseDualStack) { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + } else { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled + } + } + + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + case opWriteGetObjectResponse: + r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 0000000..0def022 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 0000000..7f7aca2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,110 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 0000000..f11bd9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,299 @@ +package s3 + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled { + req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, + aws.StringValue(req.Config.Region), nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if isUseDualStackEndpoint(req) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} + +func isUseDualStackEndpoint(req *request.Request) bool { + if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled + } + return aws.BoolValue(req.Config.UseDualStack) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 0000000..7583be6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,242 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +// +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // dual stack provided by endpoint resolver + updateS3HostForS3AccessPoint(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +// +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == s3OutpostsNamespace { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + updateHostPrefix(req, endpointsID, resolveService) + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint + opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + opts.ResolvedRegion = resolvedRegion + opts.Logger = r.Config.Logger + opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 0000000..cd6a2e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all Amazon Web Services Regions except in the North + // Virginia Region. For legacy compatibility, if you re-create an existing bucket + // that you already own in the North Virginia Region, Amazon S3 returns 200 + // OK and resets the bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This action is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 0000000..81cdec1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 0000000..308b7d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,9 @@ +//go:build !go1.6 +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 0000000..339019d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,29 @@ +//go:build go1.6 +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 0000000..9486f24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 0000000..57a0bd9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 0000000..096adc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarshalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body + // even in case of an error, so we replace it with an empty Reader. + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil)) + return + } + + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 0000000..6eecf66 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 0000000..2596c69 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 0000000..948f060 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1354 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the client- and server-side session that is associated with the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 0000000..92d82b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to +// assign user access to AWS SSO resources such as the user portal. Users can +// get AWS account applications and roles assigned to them and get federated +// into the application. +// +// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the AWS SSO User Guide. +// +// This API reference guide describes the AWS SSO Portal operations that you +// can call programatically and includes detailed information on data types +// and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to AWS SSO and other +// AWS services. For more information about the AWS SDKs, including how to download +// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 0000000..77a6792 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 0000000..7a28dc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,105 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 0000000..4cac247 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000..f1a7bfd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3445 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources that you might not normally have access to. +// These temporary credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use AssumeRole within your account +// or for cross-account access. For a comparison of AssumeRole with other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// When you create a role, you create two policies: A role trust policy that +// specifies who can assume the role and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal who is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// * Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an Amazon +// Web Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based Amazon Web +// Services access without user-specific credentials or configuration. For a +// comparison of AssumeRoleWithSAML with the other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to Amazon Web +// Services services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys +// in the metadata document that is uploaded for the SAML provider entity for +// your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. +// The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. +// Additionally, you must use Identity and Access Management (IAM) to create +// a SAML provider entity in your Amazon Web Services account that represents +// your identity provider. You must also create an IAM role that specifies this +// SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify +// a user. You can also supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito +// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the Amazon Web Services SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application +// (for example, on mobile devices) that requests temporary security credentials +// without including long-term Amazon Web Services credentials in the application. +// You also don't need to deploy server-based proxy services that use long-term +// Amazon Web Services credentials. Instead, the identity of the caller is validated +// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service +// API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. +// +// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some Amazon Web Services operations additionally +// return an encoded message that can provide details about this authorization +// failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether +// that operation returns an encoded message in addition to returning an HTTP +// code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs +// to you, you can sign in as the root user and review your root user access +// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// Amazon Web Services account root user, but we do not recommend it. Instead, +// we recommend that you create an IAM user for the purpose of the proxy application. +// Then attach a policy to the IAM user that limits federated users to only +// the actions and resources that they need to access. For more information, +// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the Amazon Web Services account root user credentials have a maximum +// duration of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service except the following: +// +// * You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an Amazon Web Services account +// or IAM user. The credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use GetSessionToken if you want +// to use MFA to protect programmatic calls to specific Amazon Web Services +// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would +// need to call GetSessionToken and submit an MFA code that is associated with +// their MFA device. Using the temporary security credentials that are returned +// from the call, IAM users can then make programmatic calls to API operations +// that require MFA authentication. If you do not supply a correct MFA code, +// then the API returns an access denied error. For a comparison of GetSessionToken +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon +// Web Services security credentials of the Amazon Web Services account root +// user or an IAM user. Credentials that are created by IAM users are valid +// for the duration that you specify. This duration can range from 900 seconds +// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default +// of 43,200 seconds (12 hours). Credentials based on account credentials can +// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a +// default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any Amazon Web Services service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with Amazon Web Services +// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with Amazon Web Services. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using Amazon Web Services account root user +// credentials, the temporary credentials have root user permissions. Similarly, +// if GetSessionToken is called using the credentials of an IAM user, the temporary +// credentials have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your Amazon Web + // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for Amazon + // Web Services internal use. + SourceIdentity *string `min:"2" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by Amazon Web Services + // when the role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // The API returns a response with the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the Amazon Web Services account. + Account *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string `type:"string"` + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using Amazon Web Services account root user credentials are restricted to + // a maximum of 3,600 seconds (one hour). If the specified duration is longer + // than one hour, the session obtained by using root user credentials defaults + // to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plaintext that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for Amazon Web Services account owners are restricted to a maximum of 3,600 + // seconds (one hour). If the duration is longer than one hour, the session + // for Amazon Web Services account owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000..d5307fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 0000000..2d98d92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,32 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for Identity and Access Management (IAM) users or for users that +// you authenticate (federated users). This guide provides descriptions of the +// STS API. For more information about using this service, see Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 0000000..b680bbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,84 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by Amazon Web + // Services. Get a new identity token from the identity provider and then retry + // the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An Amazon Web Services conversion + // compresses the session policy document, session policy ARNs, and session + // tags into a packed binary format that has a separate limit. The error message + // indicates by percentage how close the policies and tags are to the upper + // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating Amazon Web Services STS in an Amazon Web Services Region + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000..f324ff1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 0000000..e2e1d6e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/baidubce/bce-sdk-go/LICENSE b/vendor/github.com/baidubce/bce-sdk-go/LICENSE new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go b/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go new file mode 100644 index 0000000..fb1e415 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/auth/credentials.go @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// credentials.go - the credentials data structure definition + +// Package auth implements the authorization functionality for BCE. +// It use the BCE access key ID and secret access key with the specific sign algorithm to generate +// the authorization string. It also supports the temporary authorization by the STS token. +package auth + +import "errors" + +// BceCredentials define the data structure for authorization +type BceCredentials struct { + AccessKeyId string // access key id to the service + SecretAccessKey string // secret access key to the service + SessionToken string // session token generate by the STS service +} + +func (b *BceCredentials) String() string { + str := "ak: " + b.AccessKeyId + ", sk: " + b.SecretAccessKey + if len(b.SessionToken) != 0 { + return str + ", sessionToken: " + b.SessionToken + } + return str +} + +func NewBceCredentials(ak, sk string) (*BceCredentials, error) { + if len(ak) == 0 { + return nil, errors.New("accessKeyId should not be empty") + } + if len(sk) == 0 { + return nil, errors.New("secretKey should not be empty") + } + + return &BceCredentials{ak, sk, ""}, nil +} + +func NewSessionBceCredentials(ak, sk, token string) (*BceCredentials, error) { + if len(token) == 0 { + return nil, errors.New("sessionToken should not be empty") + } + + result, err := NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + result.SessionToken = token + + return result, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go b/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go new file mode 100644 index 0000000..5734798 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/auth/signer.go @@ -0,0 +1,184 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// signer.go - implement the specific sign algorithm of BCE V1 protocol + +package auth + +import ( + "fmt" + "sort" + "strings" + + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" + "github.com/baidubce/bce-sdk-go/util/log" +) + +var ( + BCE_AUTH_VERSION = "bce-auth-v1" + SIGN_JOINER = "\n" + SIGN_HEADER_JOINER = ";" + DEFAULT_EXPIRE_SECONDS = 1800 + DEFAULT_HEADERS_TO_SIGN = map[string]struct{}{ + strings.ToLower(http.HOST): {}, + strings.ToLower(http.CONTENT_LENGTH): {}, + strings.ToLower(http.CONTENT_TYPE): {}, + strings.ToLower(http.CONTENT_MD5): {}, + } +) + +// Signer abstracts the entity that implements the `Sign` method +type Signer interface { + // Sign the given Request with the Credentials and SignOptions + Sign(*http.Request, *BceCredentials, *SignOptions) +} + +// SignOptions defines the data structure used by Signer +type SignOptions struct { + HeadersToSign map[string]struct{} + Timestamp int64 + ExpireSeconds int +} + +func (opt *SignOptions) String() string { + return fmt.Sprintf(`SignOptions [ + HeadersToSign=%s; + Timestamp=%d; + ExpireSeconds=%d + ]`, opt.HeadersToSign, opt.Timestamp, opt.ExpireSeconds) +} + +// BceV1Signer implements the v1 sign algorithm +type BceV1Signer struct{} + +// Sign - generate the authorization string from the BceCredentials and SignOptions +// +// PARAMS: +// - req: *http.Request for this sign +// - cred: *BceCredentials to access the serice +// - opt: *SignOptions for this sign algorithm +func (b *BceV1Signer) Sign(req *http.Request, cred *BceCredentials, opt *SignOptions) { + if req == nil { + log.Fatal("request should not be null for sign") + return + } + if cred == nil { + log.Fatal("credentials should not be null for sign") + return + } + + // Prepare parameters + accessKeyId := cred.AccessKeyId + secretAccessKey := cred.SecretAccessKey + signDate := util.FormatISO8601Date(util.NowUTCSeconds()) + // Modify the sign time if it is not the default value but specified by client + if opt.Timestamp != 0 { + signDate = util.FormatISO8601Date(opt.Timestamp) + } + + // Set security token if using session credentials + if len(cred.SessionToken) != 0 { + req.SetHeader(http.BCE_SECURITY_TOKEN, cred.SessionToken) + } + + // Prepare the canonical request components + signKeyInfo := fmt.Sprintf("%s/%s/%s/%d", + BCE_AUTH_VERSION, + accessKeyId, + signDate, + opt.ExpireSeconds) + signKey := util.HmacSha256Hex(secretAccessKey, signKeyInfo) + canonicalUri := getCanonicalURIPath(req.Uri()) + canonicalQueryString := getCanonicalQueryString(req.Params()) + canonicalHeaders, signedHeadersArr := getCanonicalHeaders(req.Headers(), opt.HeadersToSign) + + // Generate signed headers string + signedHeaders := "" + if len(signedHeadersArr) > 0 { + sort.Strings(signedHeadersArr) + signedHeaders = strings.Join(signedHeadersArr, SIGN_HEADER_JOINER) + } + + // Generate signature + canonicalParts := []string{req.Method(), canonicalUri, canonicalQueryString, canonicalHeaders} + canonicalReq := strings.Join(canonicalParts, SIGN_JOINER) + log.Debug("CanonicalRequest data:\n" + canonicalReq) + signature := util.HmacSha256Hex(signKey, canonicalReq) + + // Generate auth string and add to the reqeust header + authStr := signKeyInfo + "/" + signedHeaders + "/" + signature + log.Info("Authorization=" + authStr) + + req.SetHeader(http.AUTHORIZATION, authStr) +} + +func getCanonicalURIPath(path string) string { + if len(path) == 0 { + return "/" + } + canonical_path := path + if strings.HasPrefix(path, "/") { + canonical_path = path[1:] + } + canonical_path = util.UriEncode(canonical_path, false) + return "/" + canonical_path +} + +func getCanonicalQueryString(params map[string]string) string { + if len(params) == 0 { + return "" + } + + result := make([]string, 0, len(params)) + for k, v := range params { + if strings.ToLower(k) == strings.ToLower(http.AUTHORIZATION) { + continue + } + item := "" + if len(v) == 0 { + item = fmt.Sprintf("%s=", util.UriEncode(k, true)) + } else { + item = fmt.Sprintf("%s=%s", util.UriEncode(k, true), util.UriEncode(v, true)) + } + result = append(result, item) + } + sort.Strings(result) + return strings.Join(result, "&") +} + +func getCanonicalHeaders(headers map[string]string, + headersToSign map[string]struct{}) (string, []string) { + canonicalHeaders := make([]string, 0, len(headers)) + signHeaders := make([]string, 0, len(headersToSign)) + for k, v := range headers { + headKey := strings.ToLower(k) + if headKey == strings.ToLower(http.AUTHORIZATION) { + continue + } + _, headExists := headersToSign[headKey] + if headExists || + (strings.HasPrefix(headKey, http.BCE_PREFIX) && + (headKey != http.BCE_REQUEST_ID)) { + + headVal := strings.TrimSpace(v) + encoded := util.UriEncode(headKey, true) + ":" + util.UriEncode(headVal, true) + canonicalHeaders = append(canonicalHeaders, encoded) + signHeaders = append(signHeaders, headKey) + } + } + sort.Strings(canonicalHeaders) + sort.Strings(signHeaders) + return strings.Join(canonicalHeaders, SIGN_JOINER), signHeaders +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go b/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go new file mode 100644 index 0000000..1b25504 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/builder.go @@ -0,0 +1,189 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// builder.go - defines the RequestBuilder structure for BCE servies + +package bce + +import ( + "encoding/json" + "fmt" +) + +// RequestBuilder holds config data for bce request. +// Some of fields are required and the others are optional. +// The builder pattern can simplify the execution of requests. +type RequestBuilder struct { + client Client + + url string // required + method string // required + queryParams map[string]string // optional + headers map[string]string // optional + body interface{} // optional + result interface{} // optional +} + +// create RequestBuilder with the client. +func NewRequestBuilder(client Client) *RequestBuilder { + return &RequestBuilder{ + client: client, + } +} + +func (b *RequestBuilder) WithURL(url string) *RequestBuilder { + b.url = url + return b +} + +func (b *RequestBuilder) WithMethod(method string) *RequestBuilder { + b.method = method + return b +} + +// set query param with the key/value directly. +func (b *RequestBuilder) WithQueryParam(key, value string) *RequestBuilder { + if b.queryParams == nil { + b.queryParams = make(map[string]string) + } + b.queryParams[key] = value + return b +} + +// set query param with the key/value only when the value is not blank. +func (b *RequestBuilder) WithQueryParamFilter(key, value string) *RequestBuilder { + if len(value) == 0 { + return b + } + return b.WithQueryParam(key, value) +} + +func (b *RequestBuilder) WithQueryParams(params map[string]string) *RequestBuilder { + if b.queryParams == nil { + b.queryParams = params + } else { + for key, value := range params { + b.queryParams[key] = value + } + } + return b +} + +func (b *RequestBuilder) WithHeader(key, value string) *RequestBuilder { + if b.headers == nil { + b.headers = make(map[string]string) + } + b.headers[key] = value + return b +} + +func (b *RequestBuilder) WithHeaders(headers map[string]string) *RequestBuilder { + if b.headers == nil { + b.headers = headers + } else { + for key, value := range headers { + b.headers[key] = value + } + } + return b +} + +func (b *RequestBuilder) WithBody(body interface{}) *RequestBuilder { + b.body = body + return b +} + +func (b *RequestBuilder) WithResult(result interface{}) *RequestBuilder { + b.result = result + return b +} + +// Do will send request to bce and get result with the builder's parameters. +func (b *RequestBuilder) Do() error { + if err := b.validate(); err != nil { + return err + } + + // build BceRequest + req, err := b.buildBceRequest() + if err != nil { + return err + } + + // get result from BceResponse + if err := b.buildBceResponse(req); err != nil { + return err + } + + return nil +} + +// Validate if the required fields are providered. +func (b *RequestBuilder) validate() error { + if len(b.url) == 0 { + return fmt.Errorf("The url can't be null.") + } + if len(b.method) == 0 { + return fmt.Errorf("The method can't be null.") + } + if b.client == nil { + return fmt.Errorf("The client can't be null.") + } + return nil +} + +func (b *RequestBuilder) buildBceRequest() (*BceRequest, error) { + // Build the request + req := &BceRequest{} + req.SetUri(b.url) + req.SetMethod(b.method) + + if b.headers != nil { + req.SetHeaders(b.headers) + } + if b.queryParams != nil { + req.SetParams(b.queryParams) + } + if b.body != nil { + bodyBytes, err := json.Marshal(b.body) + if err != nil { + return nil, err + } + body, err := NewBodyFromBytes(bodyBytes) + if err != nil { + return nil, err + } + req.SetBody(body) + } + + return req, nil +} + +func (b *RequestBuilder) buildBceResponse(req *BceRequest) error { + // Send request and get response + resp := &BceResponse{} + if err := b.client.SendRequest(req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer resp.Body().Close() + + if b.result == nil { + return nil + } + + return resp.ParseJsonBody(b.result) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go new file mode 100644 index 0000000..1600920 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go @@ -0,0 +1,267 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - definiton the BceClientConfiguration and BceClient structure + +// Package bce implements the infrastructure to access BCE services. +// +// - BceClient: +// It is the general client of BCE to access all services. It builds http request to access the +// services based on the given client configuration. +// +// - BceClientConfiguration: +// The client configuration data structure which contains endpoint, region, credentials, retry +// policy, sign options and so on. It supports most of the default value and user can also +// access or change the default with its public fields' name. +// +// - Error types: +// The error types when making request or receiving response to the BCE services contains two +// types: the BceClientError when making request to BCE services and the BceServiceError when +// recieving response from them. +// +// - BceRequest: +// The request instance stands for an request to access the BCE services. +// +// - BceResponse: +// The response instance stands for an response from the BCE services. +package bce + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" + "github.com/baidubce/bce-sdk-go/util/log" +) + +// Client is the general interface which can perform sending request. Different service +// will define its own client in case of specific extension. +type Client interface { + SendRequest(*BceRequest, *BceResponse) error + SendRequestFromBytes(*BceRequest, *BceResponse, []byte) error + GetBceClientConfig() *BceClientConfiguration +} + +// BceClient defines the general client to access the BCE services. +type BceClient struct { + Config *BceClientConfiguration + Signer auth.Signer // the sign algorithm +} + +// BuildHttpRequest - the helper method for the client to build http request +// +// PARAMS: +// - request: the input request object to be built +func (c *BceClient) buildHttpRequest(request *BceRequest) { + // Construct the http request instance for the special fields + request.BuildHttpRequest() + + // Set the client specific configurations + if request.Endpoint() == "" { + request.SetEndpoint(c.Config.Endpoint) + } + if request.Protocol() == "" { + request.SetProtocol(DEFAULT_PROTOCOL) + } + if len(c.Config.ProxyUrl) != 0 { + request.SetProxyUrl(c.Config.ProxyUrl) + } + request.SetTimeout(c.Config.ConnectionTimeoutInMillis / 1000) + + // Set the BCE request headers + request.SetHeader(http.HOST, request.Host()) + request.SetHeader(http.USER_AGENT, c.Config.UserAgent) + request.SetHeader(http.BCE_DATE, util.FormatISO8601Date(util.NowUTCSeconds())) + + // Generate the auth string if needed + if c.Config.Credentials != nil { + c.Signer.Sign(&request.Request, c.Config.Credentials, c.Config.SignOption) + } +} + +// SendRequest - the client performs sending the http request with retry policy and receive the +// response from the BCE services. +// +// PARAMS: +// - req: the request object to be sent to the BCE service +// - resp: the response object to receive the content from BCE service +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error { + // Return client error if it is not nil + if req.ClientError() != nil { + return req.ClientError() + } + + // Build the http request and prepare to send + c.buildHttpRequest(req) + log.Infof("send http request: %v", req) + + // Send request with the given retry policy + retries := 0 + if req.Body() != nil { + defer req.Body().Close() // Manually close the ReadCloser body for retry + } + for { + // The request body should be temporarily saved if retry to send the http request + var retryBuf bytes.Buffer + var teeReader io.Reader + if c.Config.Retry.ShouldRetry(nil, 0) && req.Body() != nil { + teeReader = io.TeeReader(req.Body(), &retryBuf) + req.Request.SetBody(ioutil.NopCloser(teeReader)) + } + httpResp, err := http.Execute(&req.Request) + + if err != nil { + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return &BceClientError{ + fmt.Sprintf("execute http request failed! Retried %d times, error: %v", + retries, err)} + } + retries++ + log.Warnf("send request failed: %v, retry for %d time(s)", err, retries) + if req.Body() != nil { + ioutil.ReadAll(teeReader) + req.Request.SetBody(ioutil.NopCloser(&retryBuf)) + } + continue + } + resp.SetHttpResponse(httpResp) + resp.ParseResponse() + + log.Infof("receive http response: status: %s, debugId: %s, requestId: %s, elapsed: %v", + resp.StatusText(), resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + for k, v := range resp.Headers() { + log.Debugf("%s=%s", k, v) + } + if resp.IsFail() { + err := resp.ServiceError() + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return err + } + retries++ + log.Warnf("send request failed, retry for %d time(s)", retries) + if req.Body() != nil { + ioutil.ReadAll(teeReader) + req.Request.SetBody(ioutil.NopCloser(&retryBuf)) + } + continue + } + return nil + } +} + +// SendRequestFromBytes - the client performs sending the http request with retry policy and receive the +// response from the BCE services. +// +// PARAMS: +// - req: the request object to be sent to the BCE service +// - resp: the response object to receive the content from BCE service +// - content: the content of body +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *BceClient) SendRequestFromBytes(req *BceRequest, resp *BceResponse, content []byte) error { + // Return client error if it is not nil + if req.ClientError() != nil { + return req.ClientError() + } + // Build the http request and prepare to send + c.buildHttpRequest(req) + log.Infof("send http request: %v", req) + // Send request with the given retry policy + retries := 0 + for { + // The request body should be temporarily saved if retry to send the http request + buf := bytes.NewBuffer(content) + req.Request.SetBody(ioutil.NopCloser(buf)) + defer req.Request.Body().Close() // Manually close the ReadCloser body for retry + httpResp, err := http.Execute(&req.Request) + if err != nil { + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return &BceClientError{ + fmt.Sprintf("execute http request failed! Retried %d times, error: %v", + retries, err)} + } + retries++ + log.Warnf("send request failed: %v, retry for %d time(s)", err, retries) + continue + } + resp.SetHttpResponse(httpResp) + resp.ParseResponse() + log.Infof("receive http response: status: %s, debugId: %s, requestId: %s, elapsed: %v", + resp.StatusText(), resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + for k, v := range resp.Headers() { + log.Debugf("%s=%s", k, v) + } + if resp.IsFail() { + err := resp.ServiceError() + if c.Config.Retry.ShouldRetry(err, retries) { + delay_in_mills := c.Config.Retry.GetDelayBeforeNextRetryInMillis(err, retries) + time.Sleep(delay_in_mills) + } else { + return err + } + retries++ + log.Warnf("send request failed, retry for %d time(s)", retries) + continue + } + return nil + } +} + +func (c *BceClient) GetBceClientConfig() *BceClientConfiguration { + return c.Config +} + +func NewBceClient(conf *BceClientConfiguration, sign auth.Signer) *BceClient { + clientConfig := http.ClientConfig{RedirectDisabled: conf.RedirectDisabled} + http.InitClient(clientConfig) + return &BceClient{conf, sign} +} + +func NewBceClientWithAkSk(ak, sk, endPoint string) (*BceClient, error) { + credentials, err := auth.NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + defaultSignOptions := &auth.SignOptions{ + HeadersToSign: auth.DEFAULT_HEADERS_TO_SIGN, + ExpireSeconds: auth.DEFAULT_EXPIRE_SECONDS} + defaultConf := &BceClientConfiguration{ + Endpoint: endPoint, + Region: DEFAULT_REGION, + UserAgent: DEFAULT_USER_AGENT, + Credentials: credentials, + SignOption: defaultSignOptions, + Retry: DEFAULT_RETRY_POLICY, + ConnectionTimeoutInMillis: DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS, + RedirectDisabled: false} + v1Signer := &auth.BceV1Signer{} + + return NewBceClient(defaultConf, v1Signer), nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go new file mode 100644 index 0000000..638a0d4 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go @@ -0,0 +1,80 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// config.go - define the client configuration for BCE + +package bce + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/baidubce/bce-sdk-go/auth" +) + +// Constants and default values for the package bce +const ( + SDK_VERSION = "0.9.131" + URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path + DEFAULT_DOMAIN = "baidubce.com" + DEFAULT_PROTOCOL = "http" + DEFAULT_REGION = "bj" + DEFAULT_CONTENT_TYPE = "application/json;charset=utf-8" + DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS = 1200 * 1000 +) + +var ( + DEFAULT_USER_AGENT string + DEFAULT_RETRY_POLICY = NewBackOffRetryPolicy(3, 20000, 300) +) + +func init() { + DEFAULT_USER_AGENT = "bce-sdk-go" + DEFAULT_USER_AGENT += "/" + SDK_VERSION + DEFAULT_USER_AGENT += "/" + runtime.Version() + DEFAULT_USER_AGENT += "/" + runtime.GOOS + DEFAULT_USER_AGENT += "/" + runtime.GOARCH +} + +// BceClientConfiguration defines the config components structure. +type BceClientConfiguration struct { + Endpoint string + ProxyUrl string + Region string + UserAgent string + Credentials *auth.BceCredentials + SignOption *auth.SignOptions + Retry RetryPolicy + ConnectionTimeoutInMillis int + // CnameEnabled should be true when use custom domain as endpoint to visit bos resource + CnameEnabled bool + BackupEndpoint string + RedirectDisabled bool +} + +func (c *BceClientConfiguration) String() string { + return fmt.Sprintf(`BceClientConfiguration [ + Endpoint=%s; + ProxyUrl=%s; + Region=%s; + UserAgent=%s; + Credentials=%v; + SignOption=%v; + RetryPolicy=%v; + ConnectionTimeoutInMillis=%v; + RedirectDisabled=%v + ]`, c.Endpoint, c.ProxyUrl, c.Region, c.UserAgent, c.Credentials, + c.SignOption, reflect.TypeOf(c.Retry).Name(), c.ConnectionTimeoutInMillis, c.RedirectDisabled) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/error.go b/vendor/github.com/baidubce/bce-sdk-go/bce/error.go new file mode 100644 index 0000000..e625651 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/error.go @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// error.go - define the error types for BCE + +package bce + +const ( + EACCESS_DENIED = "AccessDenied" + EINAPPROPRIATE_JSON = "InappropriateJSON" + EINTERNAL_ERROR = "InternalError" + EINVALID_ACCESS_KEY_ID = "InvalidAccessKeyId" + EINVALID_HTTP_AUTH_HEADER = "InvalidHTTPAuthHeader" + EINVALID_HTTP_REQUEST = "InvalidHTTPRequest" + EINVALID_URI = "InvalidURI" + EMALFORMED_JSON = "MalformedJSON" + EINVALID_VERSION = "InvalidVersion" + EOPT_IN_REQUIRED = "OptInRequired" + EPRECONDITION_FAILED = "PreconditionFailed" + EREQUEST_EXPIRED = "RequestExpired" + ESIGNATURE_DOES_NOT_MATCH = "SignatureDoesNotMatch" +) + +// BceError abstracts the error for BCE +type BceError interface { + error +} + +// BceClientError defines the error struct for the client when making request +type BceClientError struct{ Message string } + +func (b *BceClientError) Error() string { return b.Message } + +func NewBceClientError(msg string) *BceClientError { return &BceClientError{msg} } + +// BceServiceError defines the error struct for the BCE service when receiving response +type BceServiceError struct { + Code string + Message string + RequestId string + StatusCode int +} + +func (b *BceServiceError) Error() string { + ret := "[Code: " + b.Code + ret += "; Message: " + b.Message + ret += "; RequestId: " + b.RequestId + "]" + return ret +} + +func NewBceServiceError(code, msg, reqId string, status int) *BceServiceError { + return &BceServiceError{code, msg, reqId, status} +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/request.go b/vendor/github.com/baidubce/bce-sdk-go/bce/request.go new file mode 100644 index 0000000..ca8409e --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/request.go @@ -0,0 +1,219 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// request.go - defines the BCE servies request + +package bce + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// Body defines the data structure used in BCE request. +// Every BCE request that sets the body field must set its content-length and content-md5 headers +// to ensure the correctness of the body content forcely, and users can also set the content-sha256 +// header to strengthen the correctness with the "SetHeader" method. +type Body struct { + stream io.ReadCloser + size int64 + contentMD5 string +} + +func (b *Body) Stream() io.ReadCloser { return b.stream } + +func (b *Body) SetStream(stream io.ReadCloser) { b.stream = stream } + +func (b *Body) Size() int64 { return b.size } + +func (b *Body) ContentMD5() string { return b.contentMD5 } + +// NewBodyFromBytes - build a Body object from the byte stream to be used in the http request, it +// calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - stream: byte stream +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromBytes(stream []byte) (*Body, error) { + buf := bytes.NewBuffer(stream) + size := int64(buf.Len()) + contentMD5, err := util.CalculateContentMD5(buf, size) + if err != nil { + return nil, err + } + buf = bytes.NewBuffer(stream) + return &Body{ioutil.NopCloser(buf), size, contentMD5}, nil +} + +// NewBodyFromString - build a Body object from the string to be used in the http request, it +// calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - str: the input string +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromString(str string) (*Body, error) { + buf := bytes.NewBufferString(str) + size := int64(len(str)) + contentMD5, err := util.CalculateContentMD5(buf, size) + if err != nil { + return nil, err + } + buf = bytes.NewBufferString(str) + return &Body{ioutil.NopCloser(buf), size, contentMD5}, nil +} + +// NewBodyFromFile - build a Body object from the given file name to be used in the http request, +// it calculates the content-md5 of the byte stream and store the size as well as the stream. +// +// PARAMS: +// - fname: the given file name +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromFile(fname string) (*Body, error) { + file, err := os.Open(fname) + if err != nil { + return nil, err + } + fileInfo, infoErr := file.Stat() + if infoErr != nil { + return nil, infoErr + } + contentMD5, md5Err := util.CalculateContentMD5(file, fileInfo.Size()) + if md5Err != nil { + return nil, md5Err + } + if _, err = file.Seek(0, 0); err != nil { + return nil, err + } + return &Body{file, fileInfo.Size(), contentMD5}, nil +} + +// NewBodyFromSectionFile - build a Body object from the given file pointer with offset and size. +// It calculates the content-md5 of the given content and store the size as well as the stream. +// +// PARAMS: +// - file: the input file pointer +// - off: offset of current section body +// - size: current section body size +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromSectionFile(file *os.File, off, size int64) (*Body, error) { + if _, err := file.Seek(off, 0); err != nil { + return nil, err + } + contentMD5, md5Err := util.CalculateContentMD5(file, size) + if md5Err != nil { + return nil, md5Err + } + if _, err := file.Seek(0, 0); err != nil { + return nil, err + } + section := io.NewSectionReader(file, off, size) + return &Body{ioutil.NopCloser(section), size, contentMD5}, nil +} + +// NewBodyFromSizedReader - build a Body object from the given reader with size. +// It calculates the content-md5 of the given content and store the size as well as the stream. +// +// PARAMS: +// - r: the input reader +// - size: the size to be read, -1 is read all +// RETURNS: +// - *Body: the return Body object +// - error: error if any specific error occurs +func NewBodyFromSizedReader(r io.Reader, size int64) (*Body, error) { + var buffer bytes.Buffer + var rlen int64 + var err error + if size >= 0 { + rlen, err = io.CopyN(&buffer, r, size) + } else { + rlen, err = io.Copy(&buffer, r) + } + if err != nil { + return nil, err + } + if rlen != int64(buffer.Len()) { // must be equal + return nil, NewBceClientError("unexpected reader") + } + if size >= 0 { + if rlen < size { + return nil, NewBceClientError("size is great than reader actual size") + } + } + contentMD5, err := util.CalculateContentMD5(bytes.NewBuffer(buffer.Bytes()), rlen) + if err != nil { + return nil, err + } + body := &Body{ + stream: ioutil.NopCloser(&buffer), + size: rlen, + contentMD5: contentMD5, + } + return body, nil +} + +// BceRequest defines the request structure for accessing BCE services +type BceRequest struct { + http.Request + requestId string + clientError *BceClientError +} + +func (b *BceRequest) RequestId() string { return b.requestId } + +func (b *BceRequest) SetRequestId(val string) { b.requestId = val } + +func (b *BceRequest) ClientError() *BceClientError { return b.clientError } + +func (b *BceRequest) SetClientError(err *BceClientError) { b.clientError = err } + +func (b *BceRequest) SetBody(body *Body) { // override SetBody derived from http.Request + b.Request.SetBody(body.Stream()) + b.SetLength(body.Size()) // set field of "net/http.Request.ContentLength" + if body.Size() > 0 { + b.SetHeader(http.CONTENT_MD5, body.ContentMD5()) + b.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", body.Size())) + } +} + +func (b *BceRequest) BuildHttpRequest() { + // Only need to build the specific `requestId` field for BCE, other fields are same as the + // `http.Request` as well as its methods. + if len(b.requestId) == 0 { + // Construct the request ID with UUID + b.requestId = util.NewRequestId() + } + b.SetHeader(http.BCE_REQUEST_ID, b.requestId) +} + +func (b *BceRequest) String() string { + requestIdStr := "requestId=" + b.requestId + if b.clientError != nil { + return requestIdStr + ", client error: " + b.ClientError().Error() + } + return requestIdStr + "\n" + b.Request.String() +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/response.go b/vendor/github.com/baidubce/bce-sdk-go/bce/response.go new file mode 100644 index 0000000..94f2ae4 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/response.go @@ -0,0 +1,128 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// response.go - defines the common BCE services response + +package bce + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/baidubce/bce-sdk-go/http" +) + +// BceResponse defines the response structure for receiving BCE services response. +type BceResponse struct { + statusCode int + statusText string + requestId string + debugId string + response *http.Response + serviceError *BceServiceError +} + +func (r *BceResponse) IsFail() bool { + return r.response.StatusCode() >= 400 +} + +func (r *BceResponse) StatusCode() int { + return r.statusCode +} + +func (r *BceResponse) StatusText() string { + return r.statusText +} + +func (r *BceResponse) RequestId() string { + return r.requestId +} + +func (r *BceResponse) DebugId() string { + return r.debugId +} + +func (r *BceResponse) Header(key string) string { + return r.response.GetHeader(key) +} + +func (r *BceResponse) Headers() map[string]string { + return r.response.GetHeaders() +} + +func (r *BceResponse) Body() io.ReadCloser { + return r.response.Body() +} + +func (r *BceResponse) SetHttpResponse(response *http.Response) { + r.response = response +} + +func (r *BceResponse) ElapsedTime() time.Duration { + return r.response.ElapsedTime() +} + +func (r *BceResponse) ServiceError() *BceServiceError { + return r.serviceError +} + +func (r *BceResponse) ParseResponse() { + r.statusCode = r.response.StatusCode() + r.statusText = r.response.StatusText() + r.requestId = r.response.GetHeader(http.BCE_REQUEST_ID) + r.debugId = r.response.GetHeader(http.BCE_DEBUG_ID) + if r.IsFail() { + r.serviceError = NewBceServiceError("", r.statusText, r.requestId, r.statusCode) + + // First try to read the error `Code' and `Message' from body + rawBody, _ := ioutil.ReadAll(r.Body()) + defer r.Body().Close() + if len(rawBody) != 0 { + jsonDecoder := json.NewDecoder(bytes.NewBuffer(rawBody)) + if err := jsonDecoder.Decode(r.serviceError); err != nil { + r.serviceError = NewBceServiceError( + EMALFORMED_JSON, + "Service json error message decode failed", + r.requestId, + r.statusCode) + } + return + } + + // Then guess the `Message' from by the return status code + switch r.statusCode { + case 400: + r.serviceError.Code = EINVALID_HTTP_REQUEST + case 403: + r.serviceError.Code = EACCESS_DENIED + case 412: + r.serviceError.Code = EPRECONDITION_FAILED + case 500: + r.serviceError.Code = EINTERNAL_ERROR + default: + words := strings.Split(r.statusText, " ") + r.serviceError.Code = strings.Join(words[1:], "") + } + } +} + +func (r *BceResponse) ParseJsonBody(result interface{}) error { + defer r.Body().Close() + jsonDecoder := json.NewDecoder(r.Body()) + return jsonDecoder.Decode(result) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go b/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go new file mode 100644 index 0000000..19675a3 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/retry.go @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// retry.go - define the retry policy when making requests to BCE services + +package bce + +import ( + "net" + "net/http" + "time" + + "github.com/baidubce/bce-sdk-go/util/log" +) + +// RetryPolicy defines the two methods to retry for sending request. +type RetryPolicy interface { + ShouldRetry(BceError, int) bool + GetDelayBeforeNextRetryInMillis(BceError, int) time.Duration +} + +// NoRetryPolicy just does not retry. +type NoRetryPolicy struct{} + +func (_ *NoRetryPolicy) ShouldRetry(err BceError, attempts int) bool { + return false +} + +func (_ *NoRetryPolicy) GetDelayBeforeNextRetryInMillis( + err BceError, attempts int) time.Duration { + return 0 * time.Millisecond +} + +func NewNoRetryPolicy() *NoRetryPolicy { + return &NoRetryPolicy{} +} + +// BackOffRetryPolicy implements a policy that retries with exponential back-off strategy. +// This policy will keep retrying until the maximum number of retries is reached. The delay time +// will be a fixed interval for the first time then 2 * interval for the second, 4 * internal for +// the third, and so on. +// In general, the delay time will be 2^number_of_retries_attempted*interval. When a maximum of +// delay time is specified, the delay time will never exceed this limit. +type BackOffRetryPolicy struct { + maxErrorRetry int + maxDelayInMillis int64 + baseIntervalInMillis int64 +} + +func (b *BackOffRetryPolicy) ShouldRetry(err BceError, attempts int) bool { + // Do not retry any more when retry the max times + if attempts >= b.maxErrorRetry { + return false + } + + if err == nil { + return true + } + + // Always retry on IO error + if _, ok := err.(net.Error); ok { + return true + } + + // Only retry on a service error + if realErr, ok := err.(*BceServiceError); ok { + switch realErr.StatusCode { + case http.StatusInternalServerError: + log.Warn("retry for internal server error(500)") + return true + case http.StatusBadGateway: + log.Warn("retry for bad gateway(502)") + return true + case http.StatusServiceUnavailable: + log.Warn("retry for service unavailable(503)") + return true + case http.StatusBadRequest: + if realErr.Code != "Http400" { + return false + } + log.Warn("retry for bad request(400)") + return true + } + + if realErr.Code == EREQUEST_EXPIRED { + log.Warn("retry for request expired") + return true + } + } + return false +} + +func (b *BackOffRetryPolicy) GetDelayBeforeNextRetryInMillis( + err BceError, attempts int) time.Duration { + if attempts < 0 { + return 0 * time.Millisecond + } + delayInMillis := (1 << uint64(attempts)) * b.baseIntervalInMillis + if delayInMillis > b.maxDelayInMillis { + return time.Duration(b.maxDelayInMillis) * time.Millisecond + } + return time.Duration(delayInMillis) * time.Millisecond +} + +func NewBackOffRetryPolicy(maxRetry int, maxDelay, base int64) *BackOffRetryPolicy { + return &BackOffRetryPolicy{maxRetry, maxDelay, base} +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/client.go b/vendor/github.com/baidubce/bce-sdk-go/http/client.go new file mode 100644 index 0000000..b036a40 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/client.go @@ -0,0 +1,174 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - define the execute function to send http request and get response + +// Package http defines the structure of request and response which used to access the BCE services +// as well as the http constant headers and and methods. And finally implement the `Execute` funct- +// ion to do the work. +package http + +import ( + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + defaultMaxIdleConnsPerHost = 500 + defaultResponseHeaderTimeout = 60 * time.Second + defaultDialTimeout = 30 * time.Second + defaultSmallInterval = 600 * time.Second + defaultLargeInterval = 1200 * time.Second +) + +// The httpClient is the global variable to send the request and get response +// for reuse and the Client provided by the Go standard library is thread safe. +var ( + httpClient *http.Client + transport *http.Transport +) + +type timeoutConn struct { + conn net.Conn + smallInterval time.Duration + largeInterval time.Duration +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.smallInterval)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.largeInterval)) + return n, err +} +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.smallInterval)) + n, err = c.conn.Write(b) + c.SetWriteDeadline(time.Now().Add(c.largeInterval)) + return n, err +} +func (c *timeoutConn) Close() error { return c.conn.Close() } +func (c *timeoutConn) LocalAddr() net.Addr { return c.conn.LocalAddr() } +func (c *timeoutConn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } +func (c *timeoutConn) SetDeadline(t time.Time) error { return c.conn.SetDeadline(t) } +func (c *timeoutConn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) } +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { return c.conn.SetWriteDeadline(t) } + +type ClientConfig struct { + RedirectDisabled bool +} + +var customizeInit sync.Once + +func InitClient(config ClientConfig) { + customizeInit.Do(func() { + httpClient = &http.Client{} + transport = &http.Transport{ + MaxIdleConnsPerHost: defaultMaxIdleConnsPerHost, + ResponseHeaderTimeout: defaultResponseHeaderTimeout, + Dial: func(network, address string) (net.Conn, error) { + conn, err := net.DialTimeout(network, address, defaultDialTimeout) + if err != nil { + return nil, err + } + tc := &timeoutConn{conn, defaultSmallInterval, defaultLargeInterval} + tc.SetReadDeadline(time.Now().Add(defaultLargeInterval)) + return tc, nil + }, + } + httpClient.Transport = transport + if config.RedirectDisabled { + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + } + }) +} + +// Execute - do the http requset and get the response +// +// PARAMS: +// - request: the http request instance to be sent +// RETURNS: +// - response: the http response returned from the server +// - error: nil if ok otherwise the specific error +func Execute(request *Request) (*Response, error) { + // Build the request object for the current requesting + httpRequest := &http.Request{ + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + } + + // Set the connection timeout for current request + httpClient.Timeout = time.Duration(request.Timeout()) * time.Second + + // Set the request method + httpRequest.Method = request.Method() + + // Set the request url + internalUrl := &url.URL{ + Scheme: request.Protocol(), + Host: request.Host(), + Path: request.Uri(), + RawQuery: request.QueryString()} + httpRequest.URL = internalUrl + + // Set the request headers + internalHeader := make(http.Header) + for k, v := range request.Headers() { + val := make([]string, 0, 1) + val = append(val, v) + internalHeader[k] = val + } + httpRequest.Header = internalHeader + + if request.Body() != nil { + if request.Length() > 0 { + httpRequest.ContentLength = request.Length() + httpRequest.Body = request.Body() + } else if request.Length() < 0 { + // if set body and ContentLength <= 0, will be chunked + httpRequest.Body = request.Body() + } // else {} body == nil and ContentLength == 0 + } + + // Set the proxy setting if needed + if len(request.ProxyUrl()) != 0 { + transport.Proxy = func(_ *http.Request) (*url.URL, error) { + return url.Parse(request.ProxyUrl()) + } + } + + // Perform the http request and get response + // It needs to explicitly close the keep-alive connections when error occurs for the request + // that may continue sending request's data subsequently. + start := time.Now() + + httpResponse, err := httpClient.Do(httpRequest) + + end := time.Now() + if err != nil { + transport.CloseIdleConnections() + return nil, err + } + if httpResponse.StatusCode >= 400 && + (httpRequest.Method == PUT || httpRequest.Method == POST) { + transport.CloseIdleConnections() + } + response := &Response{httpResponse, end.Sub(start)} + return response, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/constants.go b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go new file mode 100644 index 0000000..69c5e42 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go @@ -0,0 +1,83 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// constants.go - defines constants of the BCE http package including headers and methods + +package http + +// Constants of the supported HTTP methods for BCE +const ( + GET = "GET" + PUT = "PUT" + POST = "POST" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" +) + +// Constants of the HTTP headers for BCE +const ( + // Standard HTTP Headers + AUTHORIZATION = "Authorization" + CACHE_CONTROL = "Cache-Control" + CONTENT_DISPOSITION = "Content-Disposition" + CONTENT_ENCODING = "Content-Encoding" + CONTENT_LANGUAGE = "Content-Language" + CONTENT_LENGTH = "Content-Length" + CONTENT_MD5 = "Content-Md5" + CONTENT_RANGE = "Content-Range" + CONTENT_TYPE = "Content-Type" + DATE = "Date" + ETAG = "Etag" + EXPIRES = "Expires" + HOST = "Host" + LAST_MODIFIED = "Last-Modified" + LOCATION = "Location" + RANGE = "Range" + SERVER = "Server" + TRANSFER_ENCODING = "Transfer-Encoding" + USER_AGENT = "User-Agent" + + // BCE Common HTTP Headers + BCE_PREFIX = "x-bce-" + BCE_ACL = "x-bce-acl" + BCE_GRANT_READ = "x-bce-grant-read" + BCE_GRANT_FULL_CONTROL = "x-bce-grant-full-control" + BCE_CONTENT_SHA256 = "x-bce-content-sha256" + BCE_CONTENT_CRC32 = "x-bce-content-crc32" + BCE_REQUEST_ID = "x-bce-request-id" + BCE_USER_METADATA_PREFIX = "x-bce-meta-" + BCE_SECURITY_TOKEN = "x-bce-security-token" + BCE_DATE = "x-bce-date" + + // BOS HTTP Headers + BCE_COPY_METADATA_DIRECTIVE = "x-bce-metadata-directive" + BCE_COPY_SOURCE = "x-bce-copy-source" + BCE_COPY_SOURCE_IF_MATCH = "x-bce-copy-source-if-match" + BCE_COPY_SOURCE_IF_MODIFIED_SINCE = "x-bce-copy-source-if-modified-since" + BCE_COPY_SOURCE_IF_NONE_MATCH = "x-bce-copy-source-if-none-match" + BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE = "x-bce-copy-source-if-unmodified-since" + BCE_COPY_SOURCE_RANGE = "x-bce-copy-source-range" + BCE_DEBUG_ID = "x-bce-debug-id" + BCE_OBJECT_TYPE = "x-bce-object-type" + BCE_NEXT_APPEND_OFFSET = "x-bce-next-append-offset" + BCE_STORAGE_CLASS = "x-bce-storage-class" + BCE_PROCESS = "x-bce-process" + BCE_RESTORE_TIER = "x-bce-restore-tier" + BCE_RESTORE_DAYS = "x-bce-restore-days" + BCE_RESTORE = "x-bce-restore" + BCE_FORBID_OVERWRITE = "x-bce-forbid-overwrite" + BCE_SYMLINK_TARGET = "x-bce-symlink-target" +) diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/request.go b/vendor/github.com/baidubce/bce-sdk-go/http/request.go new file mode 100644 index 0000000..7fecfb9 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/request.go @@ -0,0 +1,221 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// request.go - the custom HTTP request for BCE + +package http + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/baidubce/bce-sdk-go/util" +) + +// Reauest stands for the general http request structure to make request to the BCE services. +type Request struct { + protocol string + host string + port int + method string + uri string + proxyUrl string + timeout int + headers map[string]string + params map[string]string + + // Optional body and length fields to set the body stream and content length + body io.ReadCloser + length int64 +} + +func (r *Request) Protocol() string { + return r.protocol +} + +func (r *Request) SetProtocol(protocol string) { + r.protocol = protocol +} + +func (r *Request) Endpoint() string { + if r.host == "" { + return "" + } + return r.protocol + "://" + r.host +} + +func (r *Request) SetEndpoint(endpoint string) { + pos := strings.Index(endpoint, "://") + rest := endpoint + if pos != -1 { + r.protocol = endpoint[0:pos] + rest = endpoint[pos+3:] + } else { + r.protocol = "http" + } + + r.SetHost(rest) +} + +func (r *Request) Host() string { + return r.host +} + +func (r *Request) SetHost(host string) { + r.host = host + pos := strings.Index(host, ":") + if pos != -1 { + p, e := strconv.Atoi(host[pos+1:]) + if e == nil { + r.port = p + } + } + + if r.port == 0 { + if r.protocol == "http" { + r.port = 80 + } else if r.protocol == "https" { + r.port = 443 + } + } +} + +func (r *Request) Port() int { + return r.port +} + +func (r *Request) SetPort(port int) { + // Port can be set by the endpoint or host, this method is rarely used. + r.port = port +} + +func (r *Request) Headers() map[string]string { + return r.headers +} + +func (r *Request) SetHeaders(headers map[string]string) { + r.headers = headers +} + +func (r *Request) Header(key string) string { + if v, ok := r.headers[key]; ok { + return v + } + return "" +} + +func (r *Request) SetHeader(key, value string) { + if r.headers == nil { + r.headers = make(map[string]string) + } + r.headers[key] = value +} + +func (r *Request) Params() map[string]string { + return r.params +} + +func (r *Request) SetParams(params map[string]string) { + r.params = params +} + +func (r *Request) Param(key string) string { + if v, ok := r.params[key]; ok { + return v + } + return "" +} + +func (r *Request) SetParam(key, value string) { + if r.params == nil { + r.params = make(map[string]string) + } + r.params[key] = value +} + +func (r *Request) QueryString() string { + buf := make([]string, 0, len(r.params)) + for k, v := range r.params { + buf = append(buf, util.UriEncode(k, true)+"="+util.UriEncode(v, true)) + } + return strings.Join(buf, "&") +} + +func (r *Request) Method() string { + return r.method +} + +func (r *Request) SetMethod(method string) { + r.method = method +} + +func (r *Request) Uri() string { + return r.uri +} + +func (r *Request) SetUri(uri string) { + r.uri = uri +} + +func (r *Request) ProxyUrl() string { + return r.proxyUrl +} + +func (r *Request) SetProxyUrl(url string) { + r.proxyUrl = url +} + +func (r *Request) Timeout() int { + return r.timeout +} + +func (r *Request) SetTimeout(timeout int) { + r.timeout = timeout +} + +func (r *Request) Body() io.ReadCloser { + return r.body +} + +func (r *Request) SetBody(stream io.ReadCloser) { + r.body = stream +} + +func (r *Request) Length() int64 { + return r.length +} + +func (r *Request) SetLength(l int64) { + r.length = l +} + +func (r *Request) GenerateUrl(addPort bool) string { + if addPort { + return fmt.Sprintf("%s://%s:%d%s?%s", + r.protocol, r.host, r.port, r.uri, r.QueryString()) + } else { + return fmt.Sprintf("%s://%s%s?%s", r.protocol, r.host, r.uri, r.QueryString()) + } +} + +func (r *Request) String() string { + header := make([]string, 0, len(r.headers)) + for k, v := range r.headers { + header = append(header, "\t"+k+"="+v) + } + return fmt.Sprintf("\t%s %s\n%v", + r.method, r.GenerateUrl(false), strings.Join(header, "\n")) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/response.go b/vendor/github.com/baidubce/bce-sdk-go/http/response.go new file mode 100644 index 0000000..98632fc --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/http/response.go @@ -0,0 +1,74 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// response.go - the custom HTTP response for BCE + +package http + +import ( + "io" + "net/http" + "time" +) + +// Response defines the general http response structure for accessing the BCE services. +type Response struct { + httpResponse *http.Response // the standard libaray http.Response object + elapsedTime time.Duration // elapsed time just from sending request to receiving response +} + +func (r *Response) StatusText() string { + return r.httpResponse.Status +} + +func (r *Response) StatusCode() int { + return r.httpResponse.StatusCode +} + +func (r *Response) Protocol() string { + return r.httpResponse.Proto +} + +func (r *Response) HttpResponse() *http.Response { + return r.httpResponse +} + +func (r *Response) SetHttpResponse(response *http.Response) { + r.httpResponse = response +} + +func (r *Response) ElapsedTime() time.Duration { + return r.elapsedTime +} + +func (r *Response) GetHeader(name string) string { + return r.httpResponse.Header.Get(name) +} + +func (r *Response) GetHeaders() map[string]string { + header := r.httpResponse.Header + ret := make(map[string]string, len(header)) + for k, v := range header { + ret[k] = v[0] + } + return ret +} + +func (r *Response) ContentLength() int64 { + return r.httpResponse.ContentLength +} + +func (r *Response) Body() io.ReadCloser { + return r.httpResponse.Body +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go new file mode 100644 index 0000000..177467d --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go @@ -0,0 +1,1090 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// bucket.go - the bucket APIs definition supported by the BOS service + +// Package api defines all APIs supported by the BOS service of BCE. +package api + +import ( + "encoding/json" + "strconv" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" +) + +// ListBuckets - list all buckets of the account +// +// PARAMS: +// - cli: the client agent which can perform sending request +// RETURNS: +// - *ListBucketsResult: the result bucket list structure +// - error: nil if ok otherwise the specific error +func ListBuckets(cli bce.Client) (*ListBucketsResult, error) { + req := &bce.BceRequest{} + req.SetMethod(http.GET) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListBucketsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListObjects - list all objects of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - args: the optional arguments to list objects +// RETURNS: +// - *ListObjectsResult: the result object list structure +// - error: nil if ok otherwise the specific error +func ListObjects(cli bce.Client, bucket string, + args *ListObjectsArgs) (*ListObjectsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + + // Optional arguments settings + if args != nil { + if len(args.Delimiter) != 0 { + req.SetParam("delimiter", args.Delimiter) + } + if len(args.Marker) != 0 { + req.SetParam("marker", args.Marker) + } + if args.MaxKeys != 0 { + req.SetParam("maxKeys", strconv.Itoa(args.MaxKeys)) + } + if len(args.Prefix) != 0 { + req.SetParam("prefix", args.Prefix) + } + } + if args == nil || args.MaxKeys == 0 { + req.SetParam("maxKeys", "1000") + } + + // Send the request and get result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListObjectsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// HeadBucket - test the given bucket existed and access authority +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if exists and have authority otherwise the specific error +func HeadBucket(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.HEAD) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucket - create a new bucket with the given name +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the new bucket name +// RETURNS: +// - string: the location of the new bucket if create success +// - error: nil if create success otherwise the specific error +func PutBucket(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return resp.Header(http.LOCATION), nil +} + +// DeleteBucket - delete an empty bucket by given name +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name to be deleted +// RETURNS: +// - error: nil if delete success otherwise the specific error +func DeleteBucket(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLocation - get the location of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - string: the location of the bucket +// - error: nil if delete success otherwise the specific error +func GetBucketLocation(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("location", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &LocationType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.LocationConstraint, nil +} + +// PutBucketAcl - set the acl of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - cannedAcl: support private, public-read, public-read-write +// - aclBody: the acl file body +// RETURNS: +// - error: nil if delete success otherwise the specific error +func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("acl", "") + + // The acl setting + if len(cannedAcl) != 0 && aclBody != nil { + return bce.NewBceClientError("BOS does not support cannedAcl and acl file at the same time") + } + if validCannedAcl(cannedAcl) { + req.SetHeader(http.BCE_ACL, cannedAcl) + } + if aclBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(aclBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketAcl - get the acl of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketAclResult: the result of the bucket acl +// - error: nil if success otherwise the specific error +func GetBucketAcl(cli bce.Client, bucket string) (*GetBucketAclResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("acl", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketAclResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// PutBucketLogging - set the logging prefix of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - logging: the logging prefix json string body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketLogging(cli bce.Client, bucket string, logging *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("logging", "") + req.SetBody(logging) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLogging - get the logging config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketLoggingResult: the logging setting of the bucket +// - error: nil if success otherwise the specific error +func GetBucketLogging(cli bce.Client, bucket string) (*GetBucketLoggingResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("logging", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketLoggingResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketLogging - delete the logging setting of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketLogging(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("logging", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketLifecycle - set the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json string body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketLifecycle(cli bce.Client, bucket string, lifecycle *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("lifecycle", "") + req.SetBody(lifecycle) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketLifecycle - get the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - *GetBucketLifecycleResult: the lifecycle rule of the bucket +// - error: nil if success otherwise the specific error +func GetBucketLifecycle(cli bce.Client, bucket string) (*GetBucketLifecycleResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("lifecycle", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketLifecycleResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketLifecycle - delete the lifecycle rule of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketLifecycle(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("lifecycle", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketStorageclass - set the storage class of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - storageClass: the storage class string +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketStorageclass(cli bce.Client, bucket, storageClass string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("storageClass", "") + + obj := &StorageClassType{storageClass} + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketStorageclass - get the storage class of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - string: the storage class of the bucket +// - error: nil if success otherwise the specific error +func GetBucketStorageclass(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("storageClass", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &StorageClassType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.StorageClass, nil +} + +// PutBucketReplication - set the bucket replication of different region +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationConf: the replication config body stream +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Body, replicationRuleId string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + if replicationConf != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(replicationConf) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketReplication - get the bucket replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *GetBucketReplicationResult: the result of the bucket replication config +// - error: nil if success otherwise the specific error +func GetBucketReplication(cli bce.Client, bucket string, replicationRuleId string) (*GetBucketReplicationResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketReplicationResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListBucketReplication - list all replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func ListBucketReplication(cli bce.Client, bucket string) (*ListBucketReplicationResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replication", "") + req.SetParam("list", "") + resp := &bce.BceResponse{} + if err := cli.SendRequest(req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListBucketReplicationResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketReplication - delete the bucket replication config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketReplication(cli bce.Client, bucket string, replicationRuleId string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("replication", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketReplicationProgress - get the bucket replication process of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *GetBucketReplicationProgressResult: the result of the bucket replication process +// - error: nil if success otherwise the specific error +func GetBucketReplicationProgress(cli bce.Client, bucket string, replicationRuleId string) ( + *GetBucketReplicationProgressResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("replicationProgress", "") + if len(replicationRuleId) > 0 { + req.SetParam("id", replicationRuleId) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketReplicationProgressResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// PutBucketEncryption - set the bucket encrpytion config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - algorithm: the encryption algorithm +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketEncryption(cli bce.Client, bucket, algorithm string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("encryption", "") + + obj := &BucketEncryptionType{algorithm} + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketEncryption - get the encryption config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - algorithm: the bucket encryption algorithm +// - error: nil if success otherwise the specific error +func GetBucketEncryption(cli bce.Client, bucket string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("encryption", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + result := &BucketEncryptionType{} + if err := resp.ParseJsonBody(result); err != nil { + return "", err + } + return result.EncryptionAlgorithm, nil +} + +// DeleteBucketEncryption - delete the encryption config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketEncryption(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("encryption", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketStaticWebsite - set the bucket static website config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - confBody: the static website config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketStaticWebsite(cli bce.Client, bucket string, confBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("website", "") + if confBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(confBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketStaticWebsite - get the static website config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket static website config result object +// - error: nil if success otherwise the specific error +func GetBucketStaticWebsite(cli bce.Client, bucket string) ( + *GetBucketStaticWebsiteResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("website", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketStaticWebsiteResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketStaticWebsite - delete the static website config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketStaticWebsite(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("website", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketCors - set the bucket CORS config +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - confBody: the CORS config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketCors(cli bce.Client, bucket string, confBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("cors", "") + if confBody != nil { + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(confBody) + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketCors - get the CORS config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket CORS config result object +// - error: nil if success otherwise the specific error +func GetBucketCors(cli bce.Client, bucket string) ( + *GetBucketCorsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("cors", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketCorsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteBucketCors - delete the CORS config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketCors(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("cors", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketCopyrightProtection - set the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - resources: the resource items in the bucket to be protected +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketCopyrightProtection(cli bce.Client, bucket string, resources ...string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("copyrightProtection", "") + if len(resources) == 0 { + return bce.NewBceClientError("the resource to set copyright protection is empty") + } + arg := &CopyrightProtectionType{resources} + jsonBytes, jsonErr := json.Marshal(arg) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(body) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetBucketCopyrightProtection - get the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - result: the bucket copyright protection resources array +// - error: nil if success otherwise the specific error +func GetBucketCopyrightProtection(cli bce.Client, bucket string) ([]string, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("copyrightProtection", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CopyrightProtectionType{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result.Resource, nil +} + +// DeleteBucketCopyrightProtection - delete the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteBucketCopyrightProtection(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("copyrightProtection", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutBucketTrash - put the trash setting of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - trashDir: the trash dir name +// RETURNS: +// - error: nil if success otherwise the specific error +func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("trash", "") + reqByte, _ := json.Marshal(trashReq) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketTrash(cli bce.Client, bucket string) (*GetBucketTrashResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("trash", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketTrashResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketTrash(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("trash", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationReq PutBucketNotificationReq) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("notification", "") + reqByte, _ := json.Marshal(putBucketNotificationReq) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketNotification(cli bce.Client, bucket string) (*PutBucketNotificationReq, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("notification", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &PutBucketNotificationReq{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketNotification(cli bce.Client, bucket string) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("notification", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go new file mode 100644 index 0000000..d3b654c --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go @@ -0,0 +1,580 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// model.go - definitions of the request arguments and results data structure model + +package api + +import ( + "io" +) + +type OwnerType struct { + Id string `json:"id"` + DisplayName string `json:"displayName"` +} + +type BucketSummaryType struct { + Name string `json:"name"` + Location string `json:"location"` + CreationDate string `json:"creationDate"` +} + +// ListBucketsResult defines the result structure of ListBuckets api. +type ListBucketsResult struct { + Owner OwnerType `json:"owner"` + Buckets []BucketSummaryType `json:"buckets"` +} + +// ListObjectsArgs defines the optional arguments for ListObjects api. +type ListObjectsArgs struct { + Delimiter string `json:"delimiter"` + Marker string `json:"marker"` + MaxKeys int `json:"maxKeys"` + Prefix string `json:"prefix"` +} + +type ObjectSummaryType struct { + Key string `json:"key"` + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` + Size int `json:"size"` + StorageClass string `json:"storageClass"` + Owner OwnerType `json:"owner"` +} + +type PrefixType struct { + Prefix string `json:"prefix"` +} + +// ListObjectsResult defines the result structure of ListObjects api. +type ListObjectsResult struct { + Name string `json:"name"` + Prefix string `json:"prefix"` + Delimiter string `json:"delimiter"` + Marker string `json:"marker"` + NextMarker string `json:"nextMarker,omitempty"` + MaxKeys int `json:"maxKeys"` + IsTruncated bool `json:"isTruncated"` + Contents []ObjectSummaryType `json:"contents"` + CommonPrefixes []PrefixType `json:"commonPrefixes"` +} + +type LocationType struct { + LocationConstraint string `json:"locationConstraint"` +} + +// AclOwnerType defines the owner struct in ACL setting +type AclOwnerType struct { + Id string `json:"id"` +} + +// GranteeType defines the grantee struct in ACL setting +type GranteeType struct { + Id string `json:"id"` +} + +type AclRefererType struct { + StringLike []string `json:"stringLike"` + StringEquals []string `json:"stringEquals"` +} + +type AclCondType struct { + IpAddress []string `json:"ipAddress"` + Referer AclRefererType `json:"referer"` +} + +// GrantType defines the grant struct in ACL setting +type GrantType struct { + Grantee []GranteeType `json:"grantee"` + Permission []string `json:"permission"` + Resource []string `json:"resource,omitempty"` + NotResource []string `json:"notResource,omitempty"` + Condition AclCondType `json:"condition,omitempty"` + Effect string `json:"effect,omitempty"` +} + +// PutBucketAclArgs defines the input args structure for putting bucket acl. +type PutBucketAclArgs struct { + AccessControlList []GrantType `json:"accessControlList"` +} + +// GetBucketAclResult defines the result structure of getting bucket acl. +type GetBucketAclResult struct { + AccessControlList []GrantType `json:"accessControlList"` + Owner AclOwnerType `json:"owner"` +} + +// PutBucketLoggingArgs defines the input args structure for putting bucket logging. +type PutBucketLoggingArgs struct { + TargetBucket string `json:"targetBucket"` + TargetPrefix string `json:"targetPrefix"` +} + +// GetBucketLoggingResult defines the result structure for getting bucket logging. +type GetBucketLoggingResult struct { + Status string `json:"status"` + TargetBucket string `json:"targetBucket,omitempty"` + TargetPrefix string `json:"targetPrefix, omitempty"` +} + +// LifecycleConditionTimeType defines the structure of time condition +type LifecycleConditionTimeType struct { + DateGreaterThan string `json:"dateGreaterThan"` +} + +// LifecycleConditionType defines the structure of condition +type LifecycleConditionType struct { + Time LifecycleConditionTimeType `json:"time"` +} + +// LifecycleActionType defines the structure of lifecycle action +type LifecycleActionType struct { + Name string `json:"name"` + StorageClass string `json:"storageClass,omitempty"` +} + +// LifecycleRuleType defines the structure of a single lifecycle rule +type LifecycleRuleType struct { + Id string `json:"id"` + Status string `json:"status"` + Resource []string `json:"resource"` + Condition LifecycleConditionType `json:"condition"` + Action LifecycleActionType `json:"action"` +} + +// GetBucketLifecycleResult defines the lifecycle argument structure for putting +type PutBucketLifecycleArgs struct { + Rule []LifecycleRuleType `json:"rule"` +} + +// GetBucketLifecycleResult defines the lifecycle result structure for getting +type GetBucketLifecycleResult struct { + Rule []LifecycleRuleType `json:"rule"` +} + +type StorageClassType struct { + StorageClass string `json:"storageClass"` +} + +// BucketReplicationDescriptor defines the description data structure +type BucketReplicationDescriptor struct { + Bucket string `json:"bucket,omitempty"` + StorageClass string `json:"storageClass,omitempty"` +} + +// BucketReplicationType defines the data structure for Put and Get of bucket replication +type BucketReplicationType struct { + Id string `json:"id"` + Status string `json:"status"` + Resource []string `json:"resource"` + ReplicateDeletes string `json:"replicateDeletes"` + Destination *BucketReplicationDescriptor `json:"destination,omitempty"` + ReplicateHistory *BucketReplicationDescriptor `json:"replicateHistory,omitempty"` + CreateTime int64 `json:"createTime"` + DestRegion string `json:"destRegion"` +} + +type PutBucketReplicationArgs BucketReplicationType +type GetBucketReplicationResult BucketReplicationType + +// ListBucketReplicationResult defines output result for replication conf list +type ListBucketReplicationResult struct { + Rules []BucketReplicationType `json:"rules"` +} + +// GetBucketReplicationProgressResult defines output result for replication process +type GetBucketReplicationProgressResult struct { + Status string `json:"status"` + HistoryReplicationPercent float64 `json:"historyReplicationPercent"` + LatestReplicationTime string `json:"latestReplicationTime"` +} + +// BucketEncryptionType defines the data structure for Put and Get of bucket encryption +type BucketEncryptionType struct { + EncryptionAlgorithm string `json:"encryptionAlgorithm"` +} + +// BucketStaticWebsiteType defines the data structure for Put and Get of bucket static website +type BucketStaticWebsiteType struct { + Index string `json:"index"` + NotFound string `json:"notFound"` +} + +type PutBucketStaticWebsiteArgs BucketStaticWebsiteType +type GetBucketStaticWebsiteResult BucketStaticWebsiteType + +type BucketCORSType struct { + AllowedOrigins []string `json:"allowedOrigins"` + AllowedMethods []string `json:"allowedMethods"` + AllowedHeaders []string `json:"allowedHeaders,omitempty"` + AllowedExposeHeaders []string `json:"allowedExposeHeaders,omitempty"` + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` +} + +// PutBucketCorsArgs defines the request argument for bucket CORS setting +type PutBucketCorsArgs struct { + CorsConfiguration []BucketCORSType `json:"corsConfiguration"` +} + +// GetBucketCorsResult defines the data structure of getting bucket CORS result +type GetBucketCorsResult struct { + CorsConfiguration []BucketCORSType `json:"corsConfiguration"` +} + +// CopyrightProtectionType defines the data structure for Put and Get copyright protection API +type CopyrightProtectionType struct { + Resource []string `json:"resource"` +} + +// ObjectAclType defines the data structure for Put and Get object acl API +type ObjectAclType struct { + AccessControlList []GrantType `json:"accessControlList"` +} + +type PutObjectAclArgs ObjectAclType +type GetObjectAclResult ObjectAclType + +// PutObjectArgs defines the optional args structure for the put object api. +type PutObjectArgs struct { + CacheControl string + ContentDisposition string + ContentMD5 string + ContentType string + ContentLength int64 + Expires string + UserMeta map[string]string + ContentSha256 string + ContentCrc32 string + StorageClass string + Process string +} + +// CopyObjectArgs defines the optional args structure for the copy object api. +type CopyObjectArgs struct { + ObjectMeta + MetadataDirective string + IfMatch string + IfNoneMatch string + IfModifiedSince string + IfUnmodifiedSince string +} + +type MultiCopyObjectArgs struct { + StorageClass string +} + +// CopyObjectResult defines the result json structure for the copy object api. +type CopyObjectResult struct { + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` +} + +type ObjectMeta struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLength int64 + ContentRange string + ContentType string + ContentMD5 string + ContentSha256 string + ContentCrc32 string + Expires string + LastModified string + ETag string + UserMeta map[string]string + StorageClass string + NextAppendOffset string + ObjectType string + BceRestore string + BceObjectType string +} + +// GetObjectResult defines the result data of the get object api. +type GetObjectResult struct { + ObjectMeta + ContentLanguage string + Body io.ReadCloser +} + +// GetObjectMetaResult defines the result data of the get object meta api. +type GetObjectMetaResult struct { + ObjectMeta +} + +// SelectObjectResult defines the result data of the select object api. +type SelectObjectResult struct { + Body io.ReadCloser +} + +// selectObject request args +type SelectObjectArgs struct { + SelectType string `json: "-"` + SelectRequest *SelectObjectRequest `json: "selectRequest"` +} + +type SelectObjectRequest struct { + Expression string `json:"expression"` + ExpressionType string `json:"expressionType"` // SQL + InputSerialization *SelectObjectInput `json:"inputSerialization"` + OutputSerialization *SelectObjectOutput `json:"outputSerialization"` + RequestProgress *SelectObjectProgress `json:"requestProgress"` +} + +type SelectObjectInput struct { + CompressionType string `json:"compressionType"` + CsvParams map[string]string `json:"csv"` + JsonParams map[string]string `json:"json"` +} +type SelectObjectOutput struct { + OutputHeader bool `json:"outputHeader"` + CsvParams map[string]string `json:"csv"` + JsonParams map[string]string `json:"json"` +} +type SelectObjectProgress struct { + Enabled bool `json:"enabled"` +} + +type Prelude struct { + TotalLen uint32 + HeadersLen uint32 +} + +// selectObject response msg +type CommonMessage struct { + Prelude + Headers map[string]string // message-type/content-type…… + Crc32 uint32 // crc32 of RecordsMessage +} +type RecordsMessage struct { + CommonMessage + Records []string // csv/json seleted data, one or more records +} +type ContinuationMessage struct { + CommonMessage + BytesScanned uint64 + BytesReturned uint64 +} +type EndMessage struct { + CommonMessage +} + +// FetchObjectArgs defines the optional arguments structure for the fetch object api. +type FetchObjectArgs struct { + FetchMode string + StorageClass string +} + +// FetchObjectResult defines the result json structure for the fetch object api. +type FetchObjectResult struct { + Code string `json:"code"` + Message string `json:"message"` + RequestId string `json:"requestId"` + JobId string `json:"jobId"` +} + +// AppendObjectArgs defines the optional arguments structure for appending object. +type AppendObjectArgs struct { + Offset int64 + CacheControl string + ContentDisposition string + ContentMD5 string + ContentType string + Expires string + UserMeta map[string]string + ContentSha256 string + ContentCrc32 string + StorageClass string +} + +// AppendObjectResult defines the result data structure for appending object. +type AppendObjectResult struct { + ContentMD5 string + NextAppendOffset int64 + ContentCrc32 string + ETag string +} + +// DeleteObjectArgs defines the input args structure for a single object. +type DeleteObjectArgs struct { + Key string `json:"key"` +} + +// DeleteMultipleObjectsResult defines the input args structure for deleting multiple objects. +type DeleteMultipleObjectsArgs struct { + Objects []DeleteObjectArgs `json:"objects"` +} + +// DeleteObjectResult defines the result structure for deleting a single object. +type DeleteObjectResult struct { + Key string `json:"key"` + Code string `json:"code"` + Message string `json:"message"` +} + +// DeleteMultipleObjectsResult defines the result structure for deleting multiple objects. +type DeleteMultipleObjectsResult struct { + Errors []DeleteObjectResult `json:"errors"` +} + +// InitiateMultipartUploadArgs defines the input arguments to initiate a multipart upload. +type InitiateMultipartUploadArgs struct { + CacheControl string + ContentDisposition string + Expires string + StorageClass string +} + +// InitiateMultipartUploadResult defines the result structure to initiate a multipart upload. +type InitiateMultipartUploadResult struct { + Bucket string `json:"bucket"` + Key string `json:"key"` + UploadId string `json:"uploadId"` +} + +// UploadPartArgs defines the optinoal argumets for uploading part. +type UploadPartArgs struct { + ContentMD5 string + ContentSha256 string + ContentCrc32 string +} + +// UploadPartCopyArgs defines the optional arguments of UploadPartCopy. +type UploadPartCopyArgs struct { + SourceRange string + IfMatch string + IfNoneMatch string + IfModifiedSince string + IfUnmodifiedSince string +} + +type PutSymlinkArgs struct { + ForbidOverwrite string + StorageClass string + UserMeta map[string]string +} + +// UploadInfoType defines an uploaded part info structure. +type UploadInfoType struct { + PartNumber int `json:"partNumber"` + ETag string `json:"eTag"` +} + +// CompleteMultipartUploadArgs defines the input arguments structure of CompleteMultipartUpload. +type CompleteMultipartUploadArgs struct { + Parts []UploadInfoType `json:"parts"` + UserMeta map[string]string `json:"-"` + Process string `json:"-"` + ContentCrc32 string `json:"-"` +} + +// CompleteMultipartUploadResult defines the result structure of CompleteMultipartUpload. +type CompleteMultipartUploadResult struct { + Location string `json:"location"` + Bucket string `json:"bucket"` + Key string `json:"key"` + ETag string `json:"eTag"` + ContentCrc32 string `json:"-"` +} + +// ListPartsArgs defines the input optional arguments of listing parts information. +type ListPartsArgs struct { + MaxParts int + PartNumberMarker string +} + +type ListPartType struct { + PartNumber int `json:"partNumber"` + LastModified string `json:"lastModified"` + ETag string `json:"eTag"` + Size int `json:"size"` +} + +// ListPartsResult defines the parts info result from ListParts. +type ListPartsResult struct { + Bucket string `json:"bucket"` + Key string `json:"key"` + UploadId string `json:"uploadId"` + Initiated string `json:"initiated"` + Owner OwnerType `json:"owner"` + StorageClass string `json:"storageClass"` + PartNumberMarker int `json:"partNumberMarker"` + NextPartNumberMarker int `json:"nextPartNumberMarker"` + MaxParts int `json:"maxParts"` + IsTruncated bool `json:"isTruncated"` + Parts []ListPartType `json:"parts"` +} + +// ListMultipartUploadsArgs defines the optional arguments for ListMultipartUploads. +type ListMultipartUploadsArgs struct { + Delimiter string + KeyMarker string + MaxUploads int + Prefix string +} + +type ListMultipartUploadsType struct { + Key string `json:"key"` + UploadId string `json:"uploadId"` + Owner OwnerType `json:"owner"` + Initiated string `json:"initiated"` + StorageClass string `json:"storageClass,omitempty"` +} + +// ListMultipartUploadsResult defines the multipart uploads result structure. +type ListMultipartUploadsResult struct { + Bucket string `json:"bucket"` + CommonPrefixes []PrefixType `json:"commonPrefixes"` + Delimiter string `json:"delimiter"` + Prefix string `json:"prefix"` + IsTruncated bool `json:"isTruncated"` + KeyMarker string `json:"keyMarker"` + MaxUploads int `json:"maxUploads"` + NextKeyMarker string `json:"nextKeyMarker"` + Uploads []ListMultipartUploadsType `json:"uploads"` +} + +type ArchiveRestoreArgs struct { + RestoreTier string + RestoreDays int +} + +type GetBucketTrashResult struct { + TrashDir string `json:"trashDir"` +} + +type PutBucketTrashReq struct { + TrashDir string `json:"trashDir"` +} + +type PutBucketNotificationReq struct { + Notifications []PutBucketNotificationSt `json:"notifications"` +} + +type PutBucketNotificationSt struct { + Id string `json:"id"` + Name string `json:"name"` + AppId string `json:"appId"` + Status string `json:"status"` + Resources []string `json:"resources"` + Events []string `json:"events"` + Apps []PutBucketNotificationAppsSt `json:"apps"` +} + +type PutBucketNotificationAppsSt struct { + Id string `json:"id"` + EventUrl string `json:"eventUrl"` + XVars string `json:"xVars"` +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go new file mode 100644 index 0000000..ada259e --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go @@ -0,0 +1,421 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// multipart.go - the multipart-related APIs definition supported by the BOS service + +package api + +import ( + "bytes" + "fmt" + "strings" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// InitiateMultipartUpload - initiate a multipart upload to get a upload ID +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - contentType: the content type of the object to be uploaded which should be specified, +// otherwise use the default(application/octet-stream) +// - args: the optional arguments +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func InitiateMultipartUpload(cli bce.Client, bucket, object, contentType string, + args *InitiateMultipartUploadArgs) (*InitiateMultipartUploadResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("uploads", "") + if len(contentType) == 0 { + contentType = RAW_CONTENT_TYPE + } + req.SetHeader(http.CONTENT_TYPE, contentType) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.EXPIRES: args.Expires, + }) + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &InitiateMultipartUploadResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + defer func() { resp.Body().Close() }() + return result, nil +} + +// UploadPart - upload the single part in the multipart upload process +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func UploadPart(cli bce.Client, bucket, object, uploadId string, partNumber int, + content *bce.Body, args *UploadPartArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if content == nil { + return "", bce.NewBceClientError("upload part content should not be empty") + } + if content.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(content) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// UploadPartFromBytes - upload the single part in the multipart upload process +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func UploadPartFromBytes(cli bce.Client, bucket, object, uploadId string, partNumber int, + content []byte, args *UploadPartArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if content == nil { + return "", bce.NewBceClientError("upload part content should not be empty") + } + size := len(content) + if size >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + // set md5 and content-length + req.SetLength(int64(size)) + if size > 0 { + // calc md5 + if args == nil || args.ContentMD5 == "" { + buf := bytes.NewBuffer(content) + contentMD5, err := util.CalculateContentMD5(buf, int64(size)) + if err != nil { + return "", err + } + req.SetHeader(http.CONTENT_MD5, contentMD5) + } + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", size)) + } + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + } + // Send request and get the result + resp := &bce.BceResponse{} + if err := cli.SendRequestFromBytes(req, resp, content); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// UploadPartCopy - copy the multipart data +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - source: the copy source uri +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - args: the optional arguments +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func UploadPartCopy(cli bce.Client, bucket, object, source, uploadId string, partNumber int, + args *UploadPartCopyArgs) (*CopyObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("uploadId", uploadId) + req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + if len(source) == 0 { + return nil, bce.NewBceClientError("upload part copy source should not be empty") + } + req.SetHeader(http.BCE_COPY_SOURCE, util.UriEncode(source, false)) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.BCE_COPY_SOURCE_RANGE: args.SourceRange, + http.BCE_COPY_SOURCE_IF_MATCH: args.IfMatch, + http.BCE_COPY_SOURCE_IF_NONE_MATCH: args.IfNoneMatch, + http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: args.IfModifiedSince, + http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: args.IfUnmodifiedSince, + }) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CopyObjectResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// CompleteMultipartUpload - finish a multipart upload operation +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - parts: all parts info stream +// - meta: user defined meta data +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func CompleteMultipartUpload(cli bce.Client, bucket, object, uploadId string, + body *bce.Body, args *CompleteMultipartUploadArgs) (*CompleteMultipartUploadResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("uploadId", uploadId) + if body == nil { + return nil, bce.NewBceClientError("upload body info should not be emtpy") + } + if body.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(body) + + // Optional arguments settings + if args.UserMeta != nil { + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + if len(args.Process) != 0 { + req.SetHeader(http.BCE_PROCESS, args.Process) + } + if len(args.ContentCrc32) != 0 { + req.SetHeader(http.BCE_CONTENT_CRC32, args.ContentCrc32) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &CompleteMultipartUploadResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + headers := resp.Headers() + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + return result, nil +} + +// AbortMultipartUpload - abort a multipart upload operation +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - error: nil if ok otherwise the specific error +func AbortMultipartUpload(cli bce.Client, bucket, object, uploadId string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + req.SetParam("uploadId", uploadId) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// ListParts - list the successfully uploaded parts info by upload id +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: the optional arguments +// partNumberMarker: return parts after this marker +// maxParts: the max number of return parts, default and maximum is 1000 +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func ListParts(cli bce.Client, bucket, object, uploadId string, + args *ListPartsArgs) (*ListPartsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + req.SetParam("uploadId", uploadId) + + // Optional arguments settings + if args != nil { + if len(args.PartNumberMarker) > 0 { + req.SetParam("partNumberMarker", args.PartNumberMarker) + } + if args.MaxParts > 0 { + req.SetParam("maxParts", fmt.Sprintf("%d", args.MaxParts)) + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListPartsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// ListMultipartUploads - list the unfinished uploaded parts of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the destination bucket name +// - args: the optional arguments +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func ListMultipartUploads(cli bce.Client, bucket string, + args *ListMultipartUploadsArgs) (*ListMultipartUploadsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("uploads", "") + + // Optional arguments settings + if args != nil { + if len(args.Delimiter) > 0 { + req.SetParam("delimiter", args.Delimiter) + } + if len(args.KeyMarker) > 0 { + req.SetParam("keyMarker", args.KeyMarker) + } + if args.MaxUploads > 0 { + req.SetParam("maxUploads", fmt.Sprintf("%d", args.MaxUploads)) + } + if len(args.Prefix) > 0 { + req.SetParam("prefix", args.Prefix) + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &ListMultipartUploadsResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go new file mode 100644 index 0000000..4301757 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go @@ -0,0 +1,949 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// object.go - the object APIs definition supported by the BOS service + +package api + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/util" +) + +// PutObject - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - body: the input content of the object +// - args: the optional arguments of this api +// RETURNS: +// - string: the etag of the object +// - error: nil if ok otherwise the specific error +func PutObject(cli bce.Client, bucket, object string, body *bce.Body, + args *PutObjectArgs) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + if body == nil { + return "", bce.NewBceClientError("PutObject body should not be emtpy") + } + if body.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(body) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + if args.ContentLength > 0 { + // User specified Content-Length can be smaller than the body size, so the body should + // be reset. The `net/http.Client' does not support the Content-Length bigger than the + // body size. + if args.ContentLength > body.Size() { + return "", bce.NewBceClientError(fmt.Sprintf("ContentLength %d is bigger than body size %d", args.ContentLength, body.Size())) + } + body, err := bce.NewBodyFromSizedReader(body.Stream(), args.ContentLength) + if err != nil { + return "", bce.NewBceClientError(err.Error()) + } + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", args.ContentLength)) + req.SetBody(body) // re-assign body + } + + // Reset the contentMD5 if set by user + if len(args.ContentMD5) != 0 { + req.SetHeader(http.CONTENT_MD5, args.ContentMD5) + } + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return "", bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + + if err := setUserMetadata(req, args.UserMeta); err != nil { + return "", err + } + + if len(args.Process) != 0 { + req.SetHeader(http.BCE_PROCESS, args.Process) + } + } + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return strings.Trim(resp.Header(http.ETAG), "\""), nil +} + +// CopyObject - copy one object to a new object with new bucket and/or name. It can alse set the +// metadata of the object with the same source and target. +// +// PARAMS: +// - cli: the client object which can perform sending request +// - bucket: the bucket name of the target object +// - object: the name of the target object +// - source: the source object uri +// - *CopyObjectArgs: the optional input args for copying object +// RETURNS: +// - *CopyObjectResult: the result object which contains etag and lastmodified +// - error: nil if ok otherwise the specific error +func CopyObject(cli bce.Client, bucket, object, source string, + args *CopyObjectArgs) (*CopyObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + if len(source) == 0 { + return nil, bce.NewBceClientError("copy source should not be null") + } + req.SetHeader(http.BCE_COPY_SOURCE, util.UriEncode(source, false)) + + // Optional arguments settings + if args != nil { + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_ENCODING: args.ContentEncoding, + http.CONTENT_RANGE: args.ContentRange, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.LAST_MODIFIED: args.LastModified, + http.ETAG: args.ETag, + http.CONTENT_MD5: args.ContentMD5, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_OBJECT_TYPE: args.ObjectType, + http.BCE_NEXT_APPEND_OFFSET: args.NextAppendOffset, + http.BCE_COPY_SOURCE_IF_MATCH: args.IfMatch, + http.BCE_COPY_SOURCE_IF_NONE_MATCH: args.IfNoneMatch, + http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: args.IfModifiedSince, + http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: args.IfUnmodifiedSince, + }) + if args.ContentLength != 0 { + req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", args.ContentLength)) + } + if validMetadataDirective(args.MetadataDirective) { + req.SetHeader(http.BCE_COPY_METADATA_DIRECTIVE, args.MetadataDirective) + } else { + if len(args.MetadataDirective) != 0 { + return nil, bce.NewBceClientError( + "invalid metadata directive value: " + args.MetadataDirective) + } + } + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &CopyObjectResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// GetObject - get the object content with range and response-headers-specified support +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - responseHeaders: the optional response headers to get the given object +// - ranges: the optional range start and end to get the given object +// RETURNS: +// - *GetObjectResult: the output content result of the object +// - error: nil if ok otherwise the specific error +func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string]string, + ranges ...int64) (*GetObjectResult, error) { + + if object == "" { + err := fmt.Errorf("Get Object don't accept \"\" as a parameter") + return nil, err + } + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + + // Optional arguments settings + if responseHeaders != nil { + for k, v := range responseHeaders { + if _, ok := GET_OBJECT_ALLOWED_RESPONSE_HEADERS[k]; ok { + req.SetParam("response"+k, v) + } + } + } + if len(ranges) != 0 { + rangeStr := "bytes=" + if len(ranges) == 1 { + rangeStr += fmt.Sprintf("%d", ranges[0]) + "-" + } else { + rangeStr += fmt.Sprintf("%d", ranges[0]) + "-" + fmt.Sprintf("%d", ranges[1]) + } + req.SetHeader("Range", rangeStr) + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &GetObjectResult{} + if val, ok := headers[http.CACHE_CONTROL]; ok { + result.CacheControl = val + } + if val, ok := headers[http.CONTENT_DISPOSITION]; ok { + result.ContentDisposition = val + } + if val, ok := headers[http.CONTENT_LENGTH]; ok { + if length, err := strconv.ParseInt(val, 10, 64); err == nil { + result.ContentLength = length + } + } + if val, ok := headers[http.CONTENT_RANGE]; ok { + result.ContentRange = val + } + if val, ok := headers[http.CONTENT_TYPE]; ok { + result.ContentType = val + } + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[http.EXPIRES]; ok { + result.Expires = val + } + if val, ok := headers[http.LAST_MODIFIED]; ok { + result.LastModified = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + if val, ok := headers[http.CONTENT_LANGUAGE]; ok { + result.ContentLanguage = val + } + if val, ok := headers[http.CONTENT_ENCODING]; ok { + result.ContentEncoding = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_SHA256)]; ok { + result.ContentSha256 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_STORAGE_CLASS)]; ok { + result.StorageClass = val + } + bcePrefix := toHttpHeaderKey(http.BCE_USER_METADATA_PREFIX) + for k, v := range headers { + if strings.Index(k, bcePrefix) == 0 { + if result.UserMeta == nil { + result.UserMeta = make(map[string]string) + } + result.UserMeta[k[len(bcePrefix):]] = v + } + } + if val, ok := headers[toHttpHeaderKey(http.BCE_OBJECT_TYPE)]; ok { + result.ObjectType = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + result.NextAppendOffset = val + } + result.Body = resp.Body() + return result, nil +} + +// GetObjectMeta - get the meta data of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// RETURNS: +// - *GetObjectMetaResult: the result of this api +// - error: nil if ok otherwise the specific error +func GetObjectMeta(cli bce.Client, bucket, object string) (*GetObjectMetaResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.HEAD) + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &GetObjectMetaResult{} + if val, ok := headers[http.CACHE_CONTROL]; ok { + result.CacheControl = val + } + if val, ok := headers[http.CONTENT_DISPOSITION]; ok { + result.ContentDisposition = val + } + if val, ok := headers[http.CONTENT_LENGTH]; ok { + if length, err := strconv.ParseInt(val, 10, 64); err == nil { + result.ContentLength = length + } + } + if val, ok := headers[http.CONTENT_RANGE]; ok { + result.ContentRange = val + } + if val, ok := headers[http.CONTENT_TYPE]; ok { + result.ContentType = val + } + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[http.EXPIRES]; ok { + result.Expires = val + } + if val, ok := headers[http.LAST_MODIFIED]; ok { + result.LastModified = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + if val, ok := headers[http.CONTENT_ENCODING]; ok { + result.ContentEncoding = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_SHA256)]; ok { + result.ContentSha256 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_STORAGE_CLASS)]; ok { + result.StorageClass = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_RESTORE)]; ok { + result.BceRestore = val + } + if val, ok := headers[http.BCE_OBJECT_TYPE]; ok { + result.BceObjectType = val + } + bcePrefix := toHttpHeaderKey(http.BCE_USER_METADATA_PREFIX) + for k, v := range headers { + if strings.Index(k, bcePrefix) == 0 { + if result.UserMeta == nil { + result.UserMeta = make(map[string]string) + } + result.UserMeta[k[len(bcePrefix):]] = v + } + } + if val, ok := headers[toHttpHeaderKey(http.BCE_OBJECT_TYPE)]; ok { + result.ObjectType = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + result.NextAppendOffset = val + } + defer func() { resp.Body().Close() }() + return result, nil +} + +// SelectObject - select the object content +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - args: the optional arguments to perform the select operation +// RETURNS: +// - *SelectObjectResult: the output select content result of the object +// - error: nil if ok otherwise the specific error +func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs) (*SelectObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("select", "") + req.SetParam("type", args.SelectType) + + jsonBytes, jsonErr := json.Marshal(args) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + req.SetBody(body) + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + + result := &SelectObjectResult{} + + result.Body = resp.Body() + return result, nil +} + +// FetchObject - fetch the object by the given url and store it to a bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name to store the object +// - object: the name of the object to be stored +// - source: the source url to fetch +// - args: the optional arguments to perform the fetch operation +// RETURNS: +// - *FetchObjectArgs: the result of this api +// - error: nil if ok otherwise the specific error +func FetchObject(cli bce.Client, bucket, object, source string, + args *FetchObjectArgs) (*FetchObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("fetch", "") + if len(source) == 0 { + return nil, bce.NewBceClientError("invalid fetch source value: " + source) + } + req.SetHeader(http.BCE_PREFIX+"fetch-source", source) + + // Optional arguments settings + if args != nil { + if validFetchMode(args.FetchMode) { + req.SetHeader(http.BCE_PREFIX+"fetch-mode", args.FetchMode) + } else { + if len(args.FetchMode) != 0 { + return nil, bce.NewBceClientError("invalid fetch mode value: " + args.FetchMode) + } + } + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &FetchObjectResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// AppendObject - append the given content to a new or existed object which is appendable +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - content: the content to be appended +// - args: the optional arguments to perform the append operation +// RETURNS: +// - *AppendObjectResult: the result status for this api +// - error: nil if ok otherwise the specific error +func AppendObject(cli bce.Client, bucket, object string, content *bce.Body, + args *AppendObjectArgs) (*AppendObjectResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.POST) + req.SetParam("append", "") + if content == nil { + return nil, bce.NewBceClientError("AppendObject body should not be emtpy") + } + if content.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(content) + + // Optional arguments settings + if args != nil { + if args.Offset < 0 { + return nil, bce.NewBceClientError( + fmt.Sprintf("invalid append offset value: %d", args.Offset)) + } + if args.Offset > 0 { + req.SetParam("offset", fmt.Sprintf("%d", args.Offset)) + } + setOptionalNullHeaders(req, map[string]string{ + http.CACHE_CONTROL: args.CacheControl, + http.CONTENT_DISPOSITION: args.ContentDisposition, + http.CONTENT_MD5: args.ContentMD5, + http.CONTENT_TYPE: args.ContentType, + http.EXPIRES: args.Expires, + http.BCE_CONTENT_SHA256: args.ContentSha256, + http.BCE_CONTENT_CRC32: args.ContentCrc32, + }) + + if validStorageClass(args.StorageClass) { + req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) + } else { + if len(args.StorageClass) != 0 { + return nil, bce.NewBceClientError("invalid storage class value: " + + args.StorageClass) + } + } + if err := setUserMetadata(req, args.UserMeta); err != nil { + return nil, err + } + } + + // Send request and get the result + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + headers := resp.Headers() + result := &AppendObjectResult{} + if val, ok := headers[http.CONTENT_MD5]; ok { + result.ContentMD5 = val + } + if val, ok := headers[toHttpHeaderKey(http.BCE_NEXT_APPEND_OFFSET)]; ok { + nextOffset, offsetErr := strconv.ParseInt(val, 10, 64) + if offsetErr != nil { + nextOffset = content.Size() + } + result.NextAppendOffset = nextOffset + } else { + result.NextAppendOffset = content.Size() + } + if val, ok := headers[toHttpHeaderKey(http.BCE_CONTENT_CRC32)]; ok { + result.ContentCrc32 = val + } + if val, ok := headers[http.ETAG]; ok { + result.ETag = strings.Trim(val, "\"") + } + defer func() { resp.Body().Close() }() + return result, nil +} + +// DeleteObject - delete the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object to be deleted +// - object: the name of the object +// RETURNS: +// - error: nil if ok otherwise the specific error +func DeleteObject(cli bce.Client, bucket, object string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// DeleteMultipleObjects - delete the given objects within a single http request +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the objects to be deleted +// - objectListStream: the objects list to be delete with json format +// RETURNS: +// - *DeleteMultipleObjectsResult: the objects failed to delete +// - error: nil if ok otherwise the specific error +func DeleteMultipleObjects(cli bce.Client, bucket string, + objectListStream *bce.Body) (*DeleteMultipleObjectsResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.POST) + req.SetParam("delete", "") + req.SetHeader(http.CONTENT_TYPE, "application/json; charset=utf-8") + if objectListStream == nil { + return nil, bce.NewBceClientError("DeleteMultipleObjects body should not be emtpy") + } + if objectListStream.Size() >= THRESHOLD_100_CONTINUE { + req.SetHeader("Expect", "100-continue") + } + req.SetBody(objectListStream) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + jsonBody := &DeleteMultipleObjectsResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// GeneratePresignedUrl - generate an authorization url with expire time and optional arguments +// +// PARAMS: +// - conf: the client configuration +// - signer: the client signer object to generate the authorization string +// - bucket: the target bucket name +// - object: the target object name +// - expire: expire time in seconds +// - method: optional sign method, default is GET +// - headers: optional sign headers, default just set the Host +// - params: optional sign params, default is empty +// RETURNS: +// - string: the presigned url with authorization string + +func GeneratePresignedUrl(conf *bce.BceClientConfiguration, signer auth.Signer, bucket, + object string, expire int, method string, headers, params map[string]string) string { + return GeneratePresignedUrlInternal(conf, signer, bucket, object, expire, method, headers, params, false) +} +func GeneratePresignedUrlPathStyle(conf *bce.BceClientConfiguration, signer auth.Signer, bucket, + object string, expire int, method string, headers, params map[string]string) string { + return GeneratePresignedUrlInternal(conf, signer, bucket, object, expire, method, headers, params, true) +} + +func GeneratePresignedUrlInternal(conf *bce.BceClientConfiguration, signer auth.Signer, bucket, + object string, expire int, method string, headers, params map[string]string, path_style bool) string { + req := &bce.BceRequest{} + // Set basic arguments + if len(method) == 0 { + method = http.GET + } + req.SetMethod(method) + req.SetEndpoint(conf.Endpoint) + if req.Protocol() == "" { + req.SetProtocol(bce.DEFAULT_PROTOCOL) + } + domain := req.Host() + if pos := strings.Index(domain, ":"); pos != -1 { + domain = domain[:pos] + } + if path_style { + req.SetUri(getObjectUri(bucket, object)) + if conf.CnameEnabled || isCnameLikeHost(conf.Endpoint) { + req.SetUri(getCnameUri(req.Uri())) + } + } else { + if len(bucket) != 0 && net.ParseIP(domain) == nil { // not use an IP as the endpoint by client + req.SetUri(bce.URI_PREFIX + object) + if !conf.CnameEnabled && !isCnameLikeHost(conf.Endpoint) { + req.SetHost(bucket + "." + req.Host()) + } + } else { + req.SetUri(getObjectUri(bucket, object)) + if conf.CnameEnabled || isCnameLikeHost(conf.Endpoint) { + req.SetUri(getCnameUri(req.Uri())) + } + } + } + // Set headers and params if given. + req.SetHeader(http.HOST, req.Host()) + if headers != nil { + for k, v := range headers { + req.SetHeader(k, v) + } + } + if params != nil { + for k, v := range params { + req.SetParam(k, v) + } + } + // Copy one SignOptions object to rewrite it. + option := *conf.SignOption + if expire != 0 { + option.ExpireSeconds = expire + } + // Generate the authorization string and return the signed url. + signer.Sign(&req.Request, conf.Credentials, &option) + req.SetParam("authorization", req.Header(http.AUTHORIZATION)) + return fmt.Sprintf("%s://%s%s?%s", req.Protocol(), req.Host(), + util.UriEncode(req.Uri(), false), req.QueryString()) +} + +// PutObjectAcl - set the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - cannedAcl: support private and public-read +// - grantRead: user id list +// - grantFullControl: user id list +// - aclBody: the acl file body +// RETURNS: +// - error: nil if success otherwise the specific error +func PutObjectAcl(cli bce.Client, bucket, object, cannedAcl string, + grantRead, grantFullControl []string, aclBody *bce.Body) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("acl", "") + + // Joiner for generate the user id list string for grant acl header + joiner := func(ids []string) string { + for i := range ids { + ids[i] = "id=\"" + ids[i] + "\"" + } + return strings.Join(ids, ",") + } + + // Choose a acl setting method + methods := 0 + if len(cannedAcl) != 0 { + methods += 1 + if validCannedAcl(cannedAcl) { + req.SetHeader(http.BCE_ACL, cannedAcl) + } + } + if len(grantRead) != 0 { + methods += 1 + req.SetHeader(http.BCE_GRANT_READ, joiner(grantRead)) + } + if len(grantFullControl) != 0 { + methods += 1 + req.SetHeader(http.BCE_GRANT_FULL_CONTROL, joiner(grantFullControl)) + } + if aclBody != nil { + methods += 1 + req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) + req.SetBody(aclBody) + } + if methods != 1 { + return bce.NewBceClientError("BOS only support one acl setting method at the same time") + } + + // Do sending request + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// GetObjectAcl - get the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - result: the object acl result object +// - error: nil if success otherwise the specific error +func GetObjectAcl(cli bce.Client, bucket, object string) (*GetObjectAclResult, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + req.SetParam("acl", "") + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetObjectAclResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +// DeleteObjectAcl - delete the ACL of the given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - error: nil if success otherwise the specific error +func DeleteObjectAcl(cli bce.Client, bucket, object string) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + req.SetParam("acl", "") + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// RestoreObject - restore the archive object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - object: the object name +// - args: the restore args +// RETURNS: +// - error: nil if success otherwise the specific error +func RestoreObject(cli bce.Client, bucket string, object string, args ArchiveRestoreArgs) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetParam("restore", "") + req.SetMethod(http.POST) + req.SetHeader(http.BCE_RESTORE_DAYS, strconv.Itoa(args.RestoreDays)) + req.SetHeader(http.BCE_RESTORE_TIER, args.RestoreTier) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + + return nil +} + +// PutObjectSymlink - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - symlinkKey: the name of the symlink +// - symlinkArgs: the optional arguments of this api +// RETURNS: +// - error: nil if ok otherwise the specific error +func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey string, symlinkArgs *PutSymlinkArgs) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, symlinkKey)) + req.SetParam("symlink", "") + req.SetMethod(http.PUT) + if symlinkArgs != nil { + if len(symlinkArgs.ForbidOverwrite) != 0 { + if !validForbidOverwrite(symlinkArgs.ForbidOverwrite) { + return bce.NewBceClientError("invalid forbid overwrite val," + symlinkArgs.ForbidOverwrite) + } + req.SetHeader(http.BCE_FORBID_OVERWRITE, symlinkArgs.ForbidOverwrite) + } + + if len(symlinkArgs.StorageClass) != 0 { + if !validStorageClass(symlinkArgs.StorageClass) { + return bce.NewBceClientError("invalid storage class val," + symlinkArgs.StorageClass) + } + if symlinkArgs.StorageClass == STORAGE_CLASS_ARCHIVE { + return bce.NewBceClientError("archive storage class not support") + } + req.SetHeader(http.BCE_STORAGE_CLASS, symlinkArgs.StorageClass) + } + + if len(symlinkArgs.UserMeta) != 0 { + if err := setUserMetadata(req, symlinkArgs.UserMeta); err != nil { + return err + } + } + } + req.SetHeader(http.BCE_SYMLINK_TARGET, object) + + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +// PutObjectSymlink - put the object from the string or the stream +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - symlinkKey: the name of the symlink +// RETURNS: +// - string: the name of the target object +// - error: nil if ok otherwise the specific error +func GetObjectSymlink(cli bce.Client, bucket string, symlinkKey string) (string, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, symlinkKey)) + req.SetParam("symlink", "") + req.SetMethod(http.GET) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp); err != nil { + return "", err + } + if resp.IsFail() { + return "", resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return resp.Header(http.BCE_SYMLINK_TARGET), nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go new file mode 100644 index 0000000..9be2573 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go @@ -0,0 +1,274 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// util.go - define the utilities for api package of BOS service + +package api + +import ( + "bytes" + net_http "net/http" + "strings" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" +) + +const ( + METADATA_DIRECTIVE_COPY = "copy" + METADATA_DIRECTIVE_REPLACE = "replace" + + STORAGE_CLASS_STANDARD = "STANDARD" + STORAGE_CLASS_STANDARD_IA = "STANDARD_IA" + STORAGE_CLASS_COLD = "COLD" + STORAGE_CLASS_ARCHIVE = "ARCHIVE" + + FETCH_MODE_SYNC = "sync" + FETCH_MODE_ASYNC = "async" + + CANNED_ACL_PRIVATE = "private" + CANNED_ACL_PUBLIC_READ = "public-read" + CANNED_ACL_PUBLIC_READ_WRITE = "public-read-write" + + RAW_CONTENT_TYPE = "application/octet-stream" + + THRESHOLD_100_CONTINUE = 1 << 20 // add 100 continue header if body size bigger than 1MB + + STATUS_ENABLED = "enabled" + STATUS_DISABLED = "disabled" + + ENCRYPTION_AES256 = "AES256" + + RESTORE_TIER_STANDARD = "Standard" //标准取回对象 + RESTORE_TIER_EXPEDITED = "Expedited" //快速取回对象 + + FORBID_OVERWRITE_FALSE = "false" + FORBID_OVERWRITE_TRUE = "true" +) + +var DEFAULT_CNAME_LIKE_LIST = []string{ + ".cdn.bcebos.com", +} + +var VALID_STORAGE_CLASS_TYPE = map[string]int{ + STORAGE_CLASS_STANDARD: 0, + STORAGE_CLASS_STANDARD_IA: 1, + STORAGE_CLASS_COLD: 2, + STORAGE_CLASS_ARCHIVE: 3, +} + +var VALID_RESTORE_TIER = map[string]int{ + RESTORE_TIER_STANDARD: 1, + RESTORE_TIER_EXPEDITED: 1, +} + +var VALID_FORBID_OVERWRITE = map[string]int{ + FORBID_OVERWRITE_FALSE: 1, + FORBID_OVERWRITE_TRUE: 1, +} + +var ( + GET_OBJECT_ALLOWED_RESPONSE_HEADERS = map[string]struct{}{ + "ContentDisposition": {}, + "ContentType": {}, + "ContentLanguage": {}, + "Expires": {}, + "CacheControl": {}, + "ContentEncoding": {}, + } +) + +func getBucketUri(bucketName string) string { + return bce.URI_PREFIX + bucketName +} + +func getObjectUri(bucketName, objectName string) string { + return bce.URI_PREFIX + bucketName + "/" + objectName +} + +func getCnameUri(uri string) string { + if len(uri) <= 0 { + return uri + } + slash_index := strings.Index(uri[1:], "/") + if slash_index == -1 { + return bce.URI_PREFIX + } else { + return uri[slash_index+1:] + } +} + +func validMetadataDirective(val string) bool { + if val == METADATA_DIRECTIVE_COPY || val == METADATA_DIRECTIVE_REPLACE { + return true + } + return false +} + +func validForbidOverwrite(val string) bool { + if _, ok := VALID_FORBID_OVERWRITE[val]; ok { + return true + } + return false +} + +func validStorageClass(val string) bool { + if _, ok := VALID_STORAGE_CLASS_TYPE[val]; ok { + return true + } + return false +} + +func validFetchMode(val string) bool { + if val == FETCH_MODE_SYNC || val == FETCH_MODE_ASYNC { + return true + } + return false +} + +func validCannedAcl(val string) bool { + if val == CANNED_ACL_PRIVATE || + val == CANNED_ACL_PUBLIC_READ || + val == CANNED_ACL_PUBLIC_READ_WRITE { + return true + } + return false +} + +func toHttpHeaderKey(key string) string { + var result bytes.Buffer + needToUpper := true + for _, c := range []byte(key) { + if needToUpper && (c >= 'a' && c <= 'z') { + result.WriteByte(c - 32) + needToUpper = false + } else if c == '-' { + result.WriteByte(c) + needToUpper = true + } else { + result.WriteByte(c) + } + } + return result.String() +} + +func setOptionalNullHeaders(req *bce.BceRequest, args map[string]string) { + for k, v := range args { + if len(v) == 0 { + continue + } + switch k { + case http.CACHE_CONTROL: + fallthrough + case http.CONTENT_DISPOSITION: + fallthrough + case http.CONTENT_ENCODING: + fallthrough + case http.CONTENT_RANGE: + fallthrough + case http.CONTENT_MD5: + fallthrough + case http.CONTENT_TYPE: + fallthrough + case http.EXPIRES: + fallthrough + case http.LAST_MODIFIED: + fallthrough + case http.ETAG: + fallthrough + case http.BCE_OBJECT_TYPE: + fallthrough + case http.BCE_NEXT_APPEND_OFFSET: + fallthrough + case http.BCE_CONTENT_SHA256: + fallthrough + case http.BCE_CONTENT_CRC32: + fallthrough + case http.BCE_COPY_SOURCE_RANGE: + fallthrough + case http.BCE_COPY_SOURCE_IF_MATCH: + fallthrough + case http.BCE_COPY_SOURCE_IF_NONE_MATCH: + fallthrough + case http.BCE_COPY_SOURCE_IF_MODIFIED_SINCE: + fallthrough + case http.BCE_COPY_SOURCE_IF_UNMODIFIED_SINCE: + req.SetHeader(k, v) + } + } +} + +func setUserMetadata(req *bce.BceRequest, meta map[string]string) error { + if meta == nil { + return nil + } + for k, v := range meta { + if len(k) == 0 { + continue + } + if len(k)+len(v) > 32*1024 { + return bce.NewBceClientError("MetadataTooLarge") + } + req.SetHeader(http.BCE_USER_METADATA_PREFIX+k, v) + } + return nil +} + +func isCnameLikeHost(host string) bool { + for _, suffix := range DEFAULT_CNAME_LIKE_LIST { + if strings.HasSuffix(strings.ToLower(host), suffix) { + return true + } + } + return false +} + +func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse) error { + var ( + err error + need_retry bool + ) + + req.SetEndpoint(cli.GetBceClientConfig().Endpoint) + origin_uri := req.Uri() + // set uri for cname or cdn endpoint + if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().Endpoint) { + req.SetUri(getCnameUri(origin_uri)) + } + + if err = cli.SendRequest(req, resp); err != nil { + if serviceErr, isServiceErr := err.(*bce.BceServiceError); isServiceErr { + if serviceErr.StatusCode == net_http.StatusInternalServerError || + serviceErr.StatusCode == net_http.StatusBadGateway || + serviceErr.StatusCode == net_http.StatusServiceUnavailable || + (serviceErr.StatusCode == net_http.StatusBadRequest && serviceErr.Code == "Http400") { + need_retry = true + } + } + if _, isClientErr := err.(*bce.BceClientError); isClientErr { + need_retry = true + } + // retry backup endpoint + if need_retry && cli.GetBceClientConfig().BackupEndpoint != "" { + req.SetEndpoint(cli.GetBceClientConfig().BackupEndpoint) + if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().BackupEndpoint) { + req.SetUri(getCnameUri(origin_uri)) + } + if err = cli.SendRequest(req, resp); err != nil { + return err + } + } + } + return err +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go new file mode 100644 index 0000000..fb0591f --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go @@ -0,0 +1,2188 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// client.go - define the client for BOS service + +// Package bos defines the BOS services of BCE. The supported APIs are all defined in sub-package +// model with three types: 16 bucket APIs, 9 object APIs and 7 multipart APIs. +package bos + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + + "github.com/baidubce/bce-sdk-go/auth" + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/services/bos/api" + "github.com/baidubce/bce-sdk-go/util/log" +) + +const ( + DEFAULT_SERVICE_DOMAIN = bce.DEFAULT_REGION + ".bcebos.com" + DEFAULT_MAX_PARALLEL = 10 + MULTIPART_ALIGN = 1 << 20 // 1MB + MIN_MULTIPART_SIZE = 100 * (1 << 10) // 100 KB + DEFAULT_MULTIPART_SIZE = 12 * (1 << 20) // 12MB + + MAX_PART_NUMBER = 10000 + MAX_SINGLE_PART_SIZE = 5 * (1 << 30) // 5GB + MAX_SINGLE_OBJECT_SIZE = 48.8 * (1 << 40) // 48.8TB +) + +// Client of BOS service is a kind of BceClient, so derived from BceClient +type Client struct { + *bce.BceClient + + // Fileds that used in parallel operation for BOS service + MaxParallel int64 + MultipartSize int64 +} + +// BosClientConfiguration defines the config components structure by user. +type BosClientConfiguration struct { + Ak string + Sk string + Endpoint string + RedirectDisabled bool +} + +// NewClient make the BOS service client with default configuration. +// Use `cli.Config.xxx` to access the config or change it to non-default value. +func NewClient(ak, sk, endpoint string) (*Client, error) { + return NewClientWithConfig(&BosClientConfiguration{ + Ak: ak, + Sk: sk, + Endpoint: endpoint, + RedirectDisabled: false, + }) +} +func NewClientWithConfig(config *BosClientConfiguration) (*Client, error) { + var credentials *auth.BceCredentials + var err error + ak, sk, endpoint := config.Ak, config.Sk, config.Endpoint + if len(ak) == 0 && len(sk) == 0 { // to support public-read-write request + credentials, err = nil, nil + } else { + credentials, err = auth.NewBceCredentials(ak, sk) + if err != nil { + return nil, err + } + } + if len(endpoint) == 0 { + endpoint = DEFAULT_SERVICE_DOMAIN + } + defaultSignOptions := &auth.SignOptions{ + HeadersToSign: auth.DEFAULT_HEADERS_TO_SIGN, + ExpireSeconds: auth.DEFAULT_EXPIRE_SECONDS} + defaultConf := &bce.BceClientConfiguration{ + Endpoint: endpoint, + Region: bce.DEFAULT_REGION, + UserAgent: bce.DEFAULT_USER_AGENT, + Credentials: credentials, + SignOption: defaultSignOptions, + Retry: bce.DEFAULT_RETRY_POLICY, + ConnectionTimeoutInMillis: bce.DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS, + RedirectDisabled: config.RedirectDisabled} + v1Signer := &auth.BceV1Signer{} + + client := &Client{bce.NewBceClient(defaultConf, v1Signer), + DEFAULT_MAX_PARALLEL, DEFAULT_MULTIPART_SIZE} + return client, nil +} + +// ListBuckets - list all buckets +// +// RETURNS: +// - *api.ListBucketsResult: the all buckets +// - error: the return error if any occurs +func (c *Client) ListBuckets() (*api.ListBucketsResult, error) { + return api.ListBuckets(c) +} + +// ListObjects - list all objects of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - args: the optional arguments to list objects +// RETURNS: +// - *api.ListObjectsResult: the all objects of the bucket +// - error: the return error if any occurs +func (c *Client) ListObjects(bucket string, + args *api.ListObjectsArgs) (*api.ListObjectsResult, error) { + return api.ListObjects(c, bucket, args) +} + +// SimpleListObjects - list all objects of the given bucket with simple arguments +// +// PARAMS: +// - bucket: the bucket name +// - prefix: the prefix for listing +// - maxKeys: the max number of result objects +// - marker: the marker to mark the beginning for the listing +// - delimiter: the delimiter for list objects +// RETURNS: +// - *api.ListObjectsResult: the all objects of the bucket +// - error: the return error if any occurs +func (c *Client) SimpleListObjects(bucket, prefix string, maxKeys int, marker, + delimiter string) (*api.ListObjectsResult, error) { + args := &api.ListObjectsArgs{delimiter, marker, maxKeys, prefix} + return api.ListObjects(c, bucket, args) +} + +// HeadBucket - test the given bucket existed and access authority +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if exists and have authority otherwise the specific error +func (c *Client) HeadBucket(bucket string) error { + return api.HeadBucket(c, bucket) +} + +// DoesBucketExist - test the given bucket existed or not +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - bool: true if exists and false if not exists or occurs error +// - error: nil if exists or not exist, otherwise the specific error +func (c *Client) DoesBucketExist(bucket string) (bool, error) { + err := api.HeadBucket(c, bucket) + if err == nil { + return true, nil + } + if realErr, ok := err.(*bce.BceServiceError); ok { + if realErr.StatusCode == http.StatusForbidden { + return true, nil + } + if realErr.StatusCode == http.StatusNotFound { + return false, nil + } + } + return false, err +} + +// PutBucket - create a new bucket +// +// PARAMS: +// - bucket: the new bucket name +// RETURNS: +// - string: the location of the new bucket if create success +// - error: nil if create success otherwise the specific error +func (c *Client) PutBucket(bucket string) (string, error) { + return api.PutBucket(c, bucket) +} + +// DeleteBucket - delete a empty bucket +// +// PARAMS: +// - bucket: the bucket name to be deleted +// RETURNS: +// - error: nil if delete success otherwise the specific error +func (c *Client) DeleteBucket(bucket string) error { + return api.DeleteBucket(c, bucket) +} + +// GetBucketLocation - get the location fo the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the location of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLocation(bucket string) (string, error) { + return api.GetBucketLocation(c, bucket) +} + +// PutBucketAcl - set the acl of the given bucket with acl body stream +// +// PARAMS: +// - bucket: the bucket name +// - aclBody: the acl json body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAcl(bucket string, aclBody *bce.Body) error { + return api.PutBucketAcl(c, bucket, "", aclBody) +} + +// PutBucketAclFromCanned - set the canned acl of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - cannedAcl: the cannedAcl string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromCanned(bucket, cannedAcl string) error { + return api.PutBucketAcl(c, bucket, cannedAcl, nil) +} + +// PutBucketAclFromFile - set the acl of the given bucket with acl json file name +// +// PARAMS: +// - bucket: the bucket name +// - aclFile: the acl file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromFile(bucket, aclFile string) error { + body, err := bce.NewBodyFromFile(aclFile) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// PutBucketAclFromString - set the acl of the given bucket with acl json string +// +// PARAMS: +// - bucket: the bucket name +// - aclString: the acl string with json format +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromString(bucket, aclString string) error { + body, err := bce.NewBodyFromString(aclString) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// PutBucketAclFromStruct - set the acl of the given bucket with acl data structure +// +// PARAMS: +// - bucket: the bucket name +// - aclObj: the acl struct object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketAclFromStruct(bucket string, aclObj *api.PutBucketAclArgs) error { + jsonBytes, jsonErr := json.Marshal(aclObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketAcl(c, bucket, "", body) +} + +// GetBucketAcl - get the acl of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketAclResult: the result of the bucket acl +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketAcl(bucket string) (*api.GetBucketAclResult, error) { + return api.GetBucketAcl(c, bucket) +} + +// PutBucketLogging - set the loging setting of the given bucket with json stream +// +// PARAMS: +// - bucket: the bucket name +// - body: the json body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLogging(bucket string, body *bce.Body) error { + return api.PutBucketLogging(c, bucket, body) +} + +// PutBucketLoggingFromString - set the loging setting of the given bucket with json string +// +// PARAMS: +// - bucket: the bucket name +// - logging: the json format string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLoggingFromString(bucket, logging string) error { + body, err := bce.NewBodyFromString(logging) + if err != nil { + return err + } + return api.PutBucketLogging(c, bucket, body) +} + +// PutBucketLoggingFromStruct - set the loging setting of the given bucket with args object +// +// PARAMS: +// - bucket: the bucket name +// - obj: the logging setting object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLoggingFromStruct(bucket string, obj *api.PutBucketLoggingArgs) error { + jsonBytes, jsonErr := json.Marshal(obj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketLogging(c, bucket, body) +} + +// GetBucketLogging - get the logging setting of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketLoggingResult: the logging setting of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLogging(bucket string) (*api.GetBucketLoggingResult, error) { + return api.GetBucketLogging(c, bucket) +} + +// DeleteBucketLogging - delete the logging setting of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketLogging(bucket string) error { + return api.DeleteBucketLogging(c, bucket) +} + +// PutBucketLifecycle - set the lifecycle rule of the given bucket with raw stream +// +// PARAMS: +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLifecycle(bucket string, lifecycle *bce.Body) error { + return api.PutBucketLifecycle(c, bucket, lifecycle) +} + +// PutBucketLifecycleFromString - set the lifecycle rule of the given bucket with string +// +// PARAMS: +// - bucket: the bucket name +// - lifecycle: the lifecycle rule json format string body +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketLifecycleFromString(bucket, lifecycle string) error { + body, err := bce.NewBodyFromString(lifecycle) + if err != nil { + return err + } + return api.PutBucketLifecycle(c, bucket, body) +} + +// GetBucketLifecycle - get the lifecycle rule of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketLifecycleResult: the lifecycle rule of the bucket +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketLifecycle(bucket string) (*api.GetBucketLifecycleResult, error) { + return api.GetBucketLifecycle(c, bucket) +} + +// DeleteBucketLifecycle - delete the lifecycle rule of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketLifecycle(bucket string) error { + return api.DeleteBucketLifecycle(c, bucket) +} + +// PutBucketStorageclass - set the storage class of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - storageClass: the storage class string value +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStorageclass(bucket, storageClass string) error { + return api.PutBucketStorageclass(c, bucket, storageClass) +} + +// GetBucketStorageclass - get the storage class of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the storage class string value +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketStorageclass(bucket string) (string, error) { + return api.GetBucketStorageclass(c, bucket) +} + +// PutBucketReplication - set the bucket replication config of different region +// +// PARAMS: +// - bucket: the bucket name +// - replicationConf: the replication config json body stream +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplication(bucket string, replicationConf *bce.Body, replicationRuleId string) error { + return api.PutBucketReplication(c, bucket, replicationConf, replicationRuleId) +} + +// PutBucketReplicationFromFile - set the bucket replication config with json file name +// +// PARAMS: +// - bucket: the bucket name +// - confFile: the config json file name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromFile(bucket, confFile string, replicationRuleId string) error { + body, err := bce.NewBodyFromFile(confFile) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// PutBucketReplicationFromString - set the bucket replication config with json string +// +// PARAMS: +// - bucket: the bucket name +// - confString: the config string with json format +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromString(bucket, confString string, replicationRuleId string) error { + body, err := bce.NewBodyFromString(confString) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// PutBucketReplicationFromStruct - set the bucket replication config with struct +// +// PARAMS: +// - bucket: the bucket name +// - confObj: the replication config struct object +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketReplicationFromStruct(bucket string, + confObj *api.PutBucketReplicationArgs, replicationRuleId string) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketReplication(c, bucket, body, replicationRuleId) +} + +// GetBucketReplication - get the bucket replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *api.GetBucketReplicationResult: the result of the bucket replication config +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketReplication(bucket string, replicationRuleId string) (*api.GetBucketReplicationResult, error) { + return api.GetBucketReplication(c, bucket, replicationRuleId) +} + +// ListBucketReplication - get all replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.ListBucketReplicationResult: the list of the bucket replication config +// - error: nil if success otherwise the specific error +func (c *Client) ListBucketReplication(bucket string) (*api.ListBucketReplicationResult, error) { + return api.ListBucketReplication(c, bucket) +} + +// DeleteBucketReplication - delete the bucket replication config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketReplication(bucket string, replicationRuleId string) error { + return api.DeleteBucketReplication(c, bucket, replicationRuleId) +} + +// GetBucketReplicationProgress - get the bucket replication process of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] +// RETURNS: +// - *api.GetBucketReplicationProgressResult: the process of the bucket replication +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketReplicationProgress(bucket string, replicationRuleId string) ( + *api.GetBucketReplicationProgressResult, error) { + return api.GetBucketReplicationProgress(c, bucket, replicationRuleId) +} + +// PutBucketEncryption - set the bucket encryption config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// - algorithm: the encryption algorithm name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketEncryption(bucket, algorithm string) error { + return api.PutBucketEncryption(c, bucket, algorithm) +} + +// GetBucketEncryption - get the bucket encryption config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - string: the encryption algorithm name +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketEncryption(bucket string) (string, error) { + return api.GetBucketEncryption(c, bucket) +} + +// DeleteBucketEncryption - delete the bucket encryption config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketEncryption(bucket string) error { + return api.DeleteBucketEncryption(c, bucket) +} + +// PutBucketStaticWebsite - set the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// - config: the static website config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsite(bucket string, config *bce.Body) error { + return api.PutBucketStaticWebsite(c, bucket, config) +} + +// PutBucketStaticWebsiteFromString - set the bucket static website config from json string +// +// PARAMS: +// - bucket: the bucket name +// - jsonConfig: the static website config json string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsiteFromString(bucket, jsonConfig string) error { + body, err := bce.NewBodyFromString(jsonConfig) + if err != nil { + return err + } + return api.PutBucketStaticWebsite(c, bucket, body) +} + +// PutBucketStaticWebsiteFromStruct - set the bucket static website config from struct +// +// PARAMS: +// - bucket: the bucket name +// - confObj: the static website config object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketStaticWebsiteFromStruct(bucket string, + confObj *api.PutBucketStaticWebsiteArgs) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketStaticWebsite(c, bucket, body) +} + +// SimplePutBucketStaticWebsite - simple set the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// - index: the static website config for index file name +// - notFound: the static website config for notFound file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) SimplePutBucketStaticWebsite(bucket, index, notFound string) error { + confObj := &api.PutBucketStaticWebsiteArgs{index, notFound} + return c.PutBucketStaticWebsiteFromStruct(bucket, confObj) +} + +// GetBucketStaticWebsite - get the bucket static website config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the static website config result object +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketStaticWebsite(bucket string) ( + *api.GetBucketStaticWebsiteResult, error) { + return api.GetBucketStaticWebsite(c, bucket) +} + +// DeleteBucketStaticWebsite - delete the bucket static website config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketStaticWebsite(bucket string) error { + return api.DeleteBucketStaticWebsite(c, bucket) +} + +// PutBucketCors - set the bucket CORS config +// +// PARAMS: +// - bucket: the bucket name +// - config: the bucket CORS config body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCors(bucket string, config *bce.Body) error { + return api.PutBucketCors(c, bucket, config) +} + +// PutBucketCorsFromFile - set the bucket CORS config from json config file +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromFile(bucket, filename string) error { + body, err := bce.NewBodyFromFile(filename) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// PutBucketCorsFromString - set the bucket CORS config from json config string +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromString(bucket, jsonConfig string) error { + body, err := bce.NewBodyFromString(jsonConfig) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// PutBucketCorsFromStruct - set the bucket CORS config from json config object +// +// PARAMS: +// - bucket: the bucket name +// - filename: the bucket CORS json config object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCorsFromStruct(bucket string, confObj *api.PutBucketCorsArgs) error { + jsonBytes, jsonErr := json.Marshal(confObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutBucketCors(c, bucket, body) +} + +// GetBucketCors - get the bucket CORS config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the bucket CORS config result object +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketCors(bucket string) (*api.GetBucketCorsResult, error) { + return api.GetBucketCors(c, bucket) +} + +// DeleteBucketCors - delete the bucket CORS config of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketCors(bucket string) error { + return api.DeleteBucketCors(c, bucket) +} + +// PutBucketCopyrightProtection - set the copyright protection config of the given bucket +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name +// - resources: the resource items in the bucket to be protected +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketCopyrightProtection(bucket string, resources ...string) error { + return api.PutBucketCopyrightProtection(c, bucket, resources...) +} + +// GetBucketCopyrightProtection - get the bucket copyright protection config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - result: the bucket copyright protection config resources +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketCopyrightProtection(bucket string) ([]string, error) { + return api.GetBucketCopyrightProtection(c, bucket) +} + +// DeleteBucketCopyrightProtection - delete the bucket copyright protection config +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketCopyrightProtection(bucket string) error { + return api.DeleteBucketCopyrightProtection(c, bucket) +} + +// PutObject - upload a new object or rewrite the existed object with raw stream +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - body: the object content body +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObject(bucket, object string, body *bce.Body, + args *api.PutObjectArgs) (string, error) { + return api.PutObject(c, bucket, object, body, args) +} + +// BasicPutObject - the basic interface of uploading an object +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - body: the object content body +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) BasicPutObject(bucket, object string, body *bce.Body) (string, error) { + return api.PutObject(c, bucket, object, body, nil) +} + +// PutObjectFromBytes - upload a new object or rewrite the existed object from a byte array +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - bytesArr: the content byte array +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromBytes(bucket, object string, bytesArr []byte, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromBytes(bytesArr) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromString - upload a new object or rewrite the existed object from a string +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - content: the content string +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromString(bucket, object, content string, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromString(content) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromFile - upload a new object or rewrite the existed object from a local file +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - fileName: the local file full path name +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromFile(bucket, object, fileName string, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromFile(fileName) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// PutObjectFromStream - upload a new object or rewrite the existed object from stream +// +// PARAMS: +// - bucket: the name of the bucket to store the object +// - object: the name of the object +// - fileName: the local file full path name +// - args: the optional arguments +// RETURNS: +// - string: etag of the uploaded object +// - error: the uploaded error if any occurs +func (c *Client) PutObjectFromStream(bucket, object string, reader io.Reader, + args *api.PutObjectArgs) (string, error) { + body, err := bce.NewBodyFromSizedReader(reader, -1) + if err != nil { + return "", err + } + return api.PutObject(c, bucket, object, body, args) +} + +// CopyObject - copy a remote object to another one +// +// PARAMS: +// - bucket: the name of the destination bucket +// - object: the name of the destination object +// - srcBucket: the name of the source bucket +// - srcObject: the name of the source object +// - args: the optional arguments for copying object which are MetadataDirective, StorageClass, +// IfMatch, IfNoneMatch, ifModifiedSince, IfUnmodifiedSince +// RETURNS: +// - *api.CopyObjectResult: result struct which contains "ETag" and "LastModified" fields +// - error: any error if it occurs +func (c *Client) CopyObject(bucket, object, srcBucket, srcObject string, + args *api.CopyObjectArgs) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.CopyObject(c, bucket, object, source, args) +} + +// BasicCopyObject - the basic interface of copying a object to another one +// +// PARAMS: +// - bucket: the name of the destination bucket +// - object: the name of the destination object +// - srcBucket: the name of the source bucket +// - srcObject: the name of the source object +// RETURNS: +// - *api.CopyObjectResult: result struct which contains "ETag" and "LastModified" fields +// - error: any error if it occurs +func (c *Client) BasicCopyObject(bucket, object, srcBucket, + srcObject string) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.CopyObject(c, bucket, object, source, nil) +} + +// GetObject - get the given object with raw stream return +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - responseHeaders: the optional response headers to get the given object +// - ranges: the optional range start and end to get the given object +// RETURNS: +// - *api.GetObjectResult: result struct which contains "Body" and header fields +// for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) GetObject(bucket, object string, responseHeaders map[string]string, + ranges ...int64) (*api.GetObjectResult, error) { + return api.GetObject(c, bucket, object, responseHeaders, ranges...) +} + +// BasicGetObject - the basic interface of geting the given object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// RETURNS: +// - *api.GetObjectResult: result struct which contains "Body" and header fields +// for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) BasicGetObject(bucket, object string) (*api.GetObjectResult, error) { + return api.GetObject(c, bucket, object, nil) +} + +// BasicGetObjectToFile - use basic interface to get the given object to the given file path +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - filePath: the file path to store the object content +// RETURNS: +// - error: any error if it occurs +func (c *Client) BasicGetObjectToFile(bucket, object, filePath string) error { + res, err := api.GetObject(c, bucket, object, nil) + if err != nil { + return err + } + defer res.Body.Close() + + file, fileErr := os.OpenFile(filePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if fileErr != nil { + return fileErr + } + defer file.Close() + + written, writeErr := io.CopyN(file, res.Body, res.ContentLength) + if writeErr != nil { + return writeErr + } + if written != res.ContentLength { + return fmt.Errorf("written content size does not match the response content") + } + return nil +} + +// GetObjectMeta - get the given object metadata +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// RETURNS: +// - *api.GetObjectMetaResult: metadata result, for details reference +// https://cloud.baidu.com/doc/BOS/API.html#GetObjectMeta.E6.8E.A5.E5.8F.A3 +// - error: any error if it occurs +func (c *Client) GetObjectMeta(bucket, object string) (*api.GetObjectMetaResult, error) { + return api.GetObjectMeta(c, bucket, object) +} + +// SelectObject - select the object content +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - args: the optional arguments to select the object +// RETURNS: +// - *api.SelectObjectResult: select object result +// - error: any error if it occurs +func (c *Client) SelectObject(bucket, object string, args *api.SelectObjectArgs) (*api.SelectObjectResult, error) { + return api.SelectObject(c, bucket, object, args) +} + +// FetchObject - fetch the object content from the given source and store +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// - args: the optional arguments to fetch the object +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) FetchObject(bucket, object, source string, + args *api.FetchObjectArgs) (*api.FetchObjectResult, error) { + return api.FetchObject(c, bucket, object, source, args) +} + +// BasicFetchObject - the basic interface of the fetch object api +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) BasicFetchObject(bucket, object, source string) (*api.FetchObjectResult, error) { + return api.FetchObject(c, bucket, object, source, nil) +} + +// SimpleFetchObject - fetch object with simple arguments interface +// +// PARAMS: +// - bucket: the name of the bucket to store +// - object: the name of the object to store +// - source: fetch source url +// - mode: fetch mode which supports sync and async +// - storageClass: the storage class of the fetched object +// RETURNS: +// - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields +// - error: any error if it occurs +func (c *Client) SimpleFetchObject(bucket, object, source, mode, + storageClass string) (*api.FetchObjectResult, error) { + args := &api.FetchObjectArgs{mode, storageClass} + return api.FetchObject(c, bucket, object, source, args) +} + +// AppendObject - append the given content to a new or existed object which is appendable +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the append object stream +// - args: the optional arguments to append object +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) AppendObject(bucket, object string, content *bce.Body, + args *api.AppendObjectArgs) (*api.AppendObjectResult, error) { + return api.AppendObject(c, bucket, object, content, args) +} + +// SimpleAppendObject - the interface to append object with simple offset argument +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the append object stream +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObject(bucket, object string, content *bce.Body, + offset int64) (*api.AppendObjectResult, error) { + return api.AppendObject(c, bucket, object, content, &api.AppendObjectArgs{Offset: offset}) +} + +// SimpleAppendObjectFromString - the simple interface of appending an object from a string +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - content: the object string to append +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObjectFromString(bucket, object, content string, + offset int64) (*api.AppendObjectResult, error) { + body, err := bce.NewBodyFromString(content) + if err != nil { + return nil, err + } + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) +} + +// SimpleAppendObjectFromFile - the simple interface of appending an object from a file +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - filePath: the full file path +// - offset: the offset of where to append +// RETURNS: +// - *api.AppendObjectResult: the result of the appended object +// - error: any error if it occurs +func (c *Client) SimpleAppendObjectFromFile(bucket, object, filePath string, + offset int64) (*api.AppendObjectResult, error) { + body, err := bce.NewBodyFromFile(filePath) + if err != nil { + return nil, err + } + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) +} + +// DeleteObject - delete the given object +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - object: the name of the object to delete +// RETURNS: +// - error: any error if it occurs +func (c *Client) DeleteObject(bucket, object string) error { + return api.DeleteObject(c, bucket, object) +} + +// DeleteMultipleObjects - delete a list of objects +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListStream: the object list stream to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjects(bucket string, + objectListStream *bce.Body) (*api.DeleteMultipleObjectsResult, error) { + return api.DeleteMultipleObjects(c, bucket, objectListStream) +} + +// DeleteMultipleObjectsFromString - delete a list of objects with json format string +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListString: the object list string to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromString(bucket, + objectListString string) (*api.DeleteMultipleObjectsResult, error) { + body, err := bce.NewBodyFromString(objectListString) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// DeleteMultipleObjectsFromStruct - delete a list of objects with object list struct +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - objectListStruct: the object list struct to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromStruct(bucket string, + objectListStruct *api.DeleteMultipleObjectsArgs) (*api.DeleteMultipleObjectsResult, error) { + jsonBytes, jsonErr := json.Marshal(objectListStruct) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// DeleteMultipleObjectsFromKeyList - delete a list of objects with given key string array +// +// PARAMS: +// - bucket: the name of the bucket to delete +// - keyList: the key string list to be deleted +// RETURNS: +// - *api.DeleteMultipleObjectsResult: the delete information +// - error: any error if it occurs +func (c *Client) DeleteMultipleObjectsFromKeyList(bucket string, + keyList []string) (*api.DeleteMultipleObjectsResult, error) { + if len(keyList) == 0 { + return nil, fmt.Errorf("the key list to be deleted is empty") + } + args := make([]api.DeleteObjectArgs, len(keyList)) + for i, k := range keyList { + args[i].Key = k + } + argsContainer := &api.DeleteMultipleObjectsArgs{args} + + jsonBytes, jsonErr := json.Marshal(argsContainer) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.DeleteMultipleObjects(c, bucket, body) +} + +// InitiateMultipartUpload - initiate a multipart upload to get a upload ID +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - contentType: the content type of the object to be uploaded which should be specified, +// otherwise use the default(application/octet-stream) +// - args: the optional arguments +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func (c *Client) InitiateMultipartUpload(bucket, object, contentType string, + args *api.InitiateMultipartUploadArgs) (*api.InitiateMultipartUploadResult, error) { + return api.InitiateMultipartUpload(c, bucket, object, contentType, args) +} + +// BasicInitiateMultipartUpload - basic interface to initiate a multipart upload +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - *InitiateMultipartUploadResult: the result data structure +// - error: nil if ok otherwise the specific error +func (c *Client) BasicInitiateMultipartUpload(bucket, + object string) (*api.InitiateMultipartUploadResult, error) { + return api.InitiateMultipartUpload(c, bucket, object, "", nil) +} + +// UploadPart - upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPart(bucket, object, uploadId string, partNumber int, + content *bce.Body, args *api.UploadPartArgs) (string, error) { + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, args) +} + +// BasicUploadPart - basic interface to upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) BasicUploadPart(bucket, object, uploadId string, partNumber int, + content *bce.Body) (string, error) { + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, nil) +} + +// UploadPartFromBytes - upload the single part in the multipart upload process +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - content: the uploaded part content +// - args: the optional arguments +// RETURNS: +// - string: the etag of the uploaded part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPartFromBytes(bucket, object, uploadId string, partNumber int, + content []byte, args *api.UploadPartArgs) (string, error) { + return api.UploadPartFromBytes(c, bucket, object, uploadId, partNumber, content, args) +} + +// UploadPartCopy - copy the multipart object +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - srcBucket: the source bucket +// - srcObject: the source object +// - uploadId: the multipart upload id +// - partNumber: the current part number +// - args: the optional arguments +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func (c *Client) UploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, + partNumber int, args *api.UploadPartCopyArgs) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) +} + +// BasicUploadPartCopy - basic interface to copy the multipart object +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - srcBucket: the source bucket +// - srcObject: the source object +// - uploadId: the multipart upload id +// - partNumber: the current part number +// RETURNS: +// - *CopyObjectResult: the lastModified and eTag of the part +// - error: nil if ok otherwise the specific error +func (c *Client) BasicUploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, + partNumber int) (*api.CopyObjectResult, error) { + source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, nil) +} + +// CompleteMultipartUpload - finish a multipart upload operation with parts stream +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - parts: all parts info stream +// - meta: user defined meta data +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func (c *Client) CompleteMultipartUpload(bucket, object, uploadId string, + body *bce.Body, args *api.CompleteMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) +} + +// CompleteMultipartUploadFromStruct - finish a multipart upload operation with parts struct +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: args info struct object +// RETURNS: +// - *CompleteMultipartUploadResult: the result data +// - error: nil if ok otherwise the specific error +func (c *Client) CompleteMultipartUploadFromStruct(bucket, object, uploadId string, + args *api.CompleteMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + jsonBytes, jsonErr := json.Marshal(args) + if jsonErr != nil { + return nil, jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return nil, err + } + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) +} + +// AbortMultipartUpload - abort a multipart upload operation +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) AbortMultipartUpload(bucket, object, uploadId string) error { + return api.AbortMultipartUpload(c, bucket, object, uploadId) +} + +// ListParts - list the successfully uploaded parts info by upload id +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// - args: the optional arguments +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) ListParts(bucket, object, uploadId string, + args *api.ListPartsArgs) (*api.ListPartsResult, error) { + return api.ListParts(c, bucket, object, uploadId, args) +} + +// BasicListParts - basic interface to list the successfully uploaded parts info by upload id +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - uploadId: the multipart upload id +// RETURNS: +// - *ListPartsResult: the uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) BasicListParts(bucket, object, uploadId string) (*api.ListPartsResult, error) { + return api.ListParts(c, bucket, object, uploadId, nil) +} + +// ListMultipartUploads - list the unfinished uploaded parts of the given bucket +// +// PARAMS: +// - bucket: the destination bucket name +// - args: the optional arguments +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) ListMultipartUploads(bucket string, + args *api.ListMultipartUploadsArgs) (*api.ListMultipartUploadsResult, error) { + return api.ListMultipartUploads(c, bucket, args) +} + +// BasicListMultipartUploads - basic interface to list the unfinished uploaded parts +// +// PARAMS: +// - bucket: the destination bucket name +// RETURNS: +// - *ListMultipartUploadsResult: the unfinished uploaded parts info result +// - error: nil if ok otherwise the specific error +func (c *Client) BasicListMultipartUploads(bucket string) ( + *api.ListMultipartUploadsResult, error) { + return api.ListMultipartUploads(c, bucket, nil) +} + +// UploadSuperFile - parallel upload the super file by using the multipart upload interface +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - fileName: the local full path filename of the super file +// - storageClass: the storage class to be set to the uploaded file +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) UploadSuperFile(bucket, object, fileName, storageClass string) error { + // Get the file size and check the size for multipart upload + file, fileErr := os.Open(fileName) + if fileErr != nil { + return fileErr + } + oldTimeout := c.Config.ConnectionTimeoutInMillis + c.Config.ConnectionTimeoutInMillis = 0 + defer func() { + c.Config.ConnectionTimeoutInMillis = oldTimeout + file.Close() + }() + fileInfo, infoErr := file.Stat() + if infoErr != nil { + return infoErr + } + size := fileInfo.Size() + if size < MIN_MULTIPART_SIZE || c.MultipartSize < MIN_MULTIPART_SIZE { + return bce.NewBceClientError("multipart size should not be less than 1MB") + } + + // Calculate part size and total part number + partSize := (c.MultipartSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum := (size + partSize - 1) / partSize + if partNum > MAX_PART_NUMBER { + partSize = (size + MAX_PART_NUMBER - 1) / MAX_PART_NUMBER + partSize = (partSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum = (size + partSize - 1) / partSize + } + log.Debugf("starting upload super file, total parts: %d, part size: %d", partNum, partSize) + + // Inner wrapper function of parallel uploading each part to get the ETag of the part + uploadPart := func(bucket, object, uploadId string, partNumber int, body *bce.Body, + result chan *api.UploadInfoType, ret chan error, id int64, pool chan int64) { + etag, err := c.BasicUploadPart(bucket, object, uploadId, partNumber, body) + if err != nil { + result <- nil + ret <- err + } else { + result <- &api.UploadInfoType{partNumber, etag} + } + pool <- id + } + + // Do the parallel multipart upload + resp, err := c.InitiateMultipartUpload(bucket, object, "", + &api.InitiateMultipartUploadArgs{StorageClass: storageClass}) + if err != nil { + return err + } + uploadId := resp.UploadId + uploadedResult := make(chan *api.UploadInfoType, partNum) + retChan := make(chan error, partNum) + workerPool := make(chan int64, c.MaxParallel) + for i := int64(0); i < c.MaxParallel; i++ { + workerPool <- i + } + for partId := int64(1); partId <= partNum; partId++ { + uploadSize := partSize + offset := (partId - 1) * partSize + left := size - offset + if uploadSize > left { + uploadSize = left + } + partBody, _ := bce.NewBodyFromSectionFile(file, offset, uploadSize) + select { // wait until get a worker to upload + case workerId := <-workerPool: + go uploadPart(bucket, object, uploadId, int(partId), partBody, + uploadedResult, retChan, workerId, workerPool) + case uploadPartErr := <-retChan: + c.AbortMultipartUpload(bucket, object, uploadId) + return uploadPartErr + } + } + + // Check the return of each part uploading, and decide to complete or abort it + completeArgs := &api.CompleteMultipartUploadArgs{ + Parts: make([]api.UploadInfoType, partNum), + } + for i := partNum; i > 0; i-- { + uploaded := <-uploadedResult + if uploaded == nil { // error occurs and not be caught in `select' statement + c.AbortMultipartUpload(bucket, object, uploadId) + return <-retChan + } + completeArgs.Parts[uploaded.PartNumber-1] = *uploaded + log.Debugf("upload part %d success, etag: %s", uploaded.PartNumber, uploaded.ETag) + } + if _, err := c.CompleteMultipartUploadFromStruct(bucket, object, + uploadId, completeArgs); err != nil { + c.AbortMultipartUpload(bucket, object, uploadId) + return err + } + return nil +} + +// DownloadSuperFile - parallel download the super file using the get object with range +// +// PARAMS: +// - bucket: the destination bucket name +// - object: the destination object name +// - fileName: the local full path filename to store the object +// RETURNS: +// - error: nil if ok otherwise the specific error +func (c *Client) DownloadSuperFile(bucket, object, fileName string) (err error) { + file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return + } + oldTimeout := c.Config.ConnectionTimeoutInMillis + c.Config.ConnectionTimeoutInMillis = 0 + defer func() { + c.Config.ConnectionTimeoutInMillis = oldTimeout + file.Close() + if err != nil { + os.Remove(fileName) + } + }() + + meta, err := c.GetObjectMeta(bucket, object) + if err != nil { + return + } + size := meta.ContentLength + partSize := (c.MultipartSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum := (size + partSize - 1) / partSize + log.Debugf("starting download super file, total parts: %d, part size: %d", partNum, partSize) + + doneChan := make(chan struct{}, partNum) + abortChan := make(chan struct{}) + + // Set up multiple goroutine workers to download the object + workerPool := make(chan int64, c.MaxParallel) + for i := int64(0); i < c.MaxParallel; i++ { + workerPool <- i + } + for i := int64(0); i < partNum; i++ { + rangeStart := i * partSize + rangeEnd := (i+1)*partSize - 1 + if rangeEnd > size-1 { + rangeEnd = size - 1 + } + select { + case workerId := <-workerPool: + go func(rangeStart, rangeEnd, workerId int64) { + res, rangeGetErr := c.GetObject(bucket, object, nil, rangeStart, rangeEnd) + if rangeGetErr != nil { + log.Errorf("download object part(offset:%d, size:%d) failed: %v", + rangeStart, res.ContentLength, rangeGetErr) + abortChan <- struct{}{} + err = rangeGetErr + return + } + defer res.Body.Close() + log.Debugf("writing part %d with offset=%d, size=%d", rangeStart/partSize, + rangeStart, res.ContentLength) + buf := make([]byte, 4096) + offset := rangeStart + for { + n, e := res.Body.Read(buf) + if e != nil && e != io.EOF { + abortChan <- struct{}{} + err = e + return + } + if n == 0 { + break + } + if _, writeErr := file.WriteAt(buf[:n], offset); writeErr != nil { + abortChan <- struct{}{} + err = writeErr + return + } + offset += int64(n) + } + log.Debugf("writing part %d done", rangeStart/partSize) + workerPool <- workerId + doneChan <- struct{}{} + }(rangeStart, rangeEnd, workerId) + case <-abortChan: // abort range get if error occurs during downloading any part + return + } + } + + // Wait for writing to local file done + for i := partNum; i > 0; i-- { + <-doneChan + } + return nil +} + +// GeneratePresignedUrl - generate an authorization url with expire time and optional arguments +// +// PARAMS: +// - bucket: the target bucket name +// - object: the target object name +// - expireInSeconds: the expire time in seconds of the signed url +// - method: optional sign method, default is GET +// - headers: optional sign headers, default just set the Host +// - params: optional sign params, default is empty +// RETURNS: +// - string: the presigned url with authorization string +func (c *Client) GeneratePresignedUrl(bucket, object string, expireInSeconds int, method string, + headers, params map[string]string) string { + return api.GeneratePresignedUrl(c.Config, c.Signer, bucket, object, + expireInSeconds, method, headers, params) +} + +func (c *Client) GeneratePresignedUrlPathStyle(bucket, object string, expireInSeconds int, method string, + headers, params map[string]string) string { + return api.GeneratePresignedUrlPathStyle(c.Config, c.Signer, bucket, object, + expireInSeconds, method, headers, params) +} + +// BasicGeneratePresignedUrl - basic interface to generate an authorization url with expire time +// +// PARAMS: +// - bucket: the target bucket name +// - object: the target object name +// - expireInSeconds: the expire time in seconds of the signed url +// RETURNS: +// - string: the presigned url with authorization string +func (c *Client) BasicGeneratePresignedUrl(bucket, object string, expireInSeconds int) string { + return api.GeneratePresignedUrl(c.Config, c.Signer, bucket, object, + expireInSeconds, "", nil, nil) +} + +// PutObjectAcl - set the ACL of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclBody: the acl json body stream +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAcl(bucket, object string, aclBody *bce.Body) error { + return api.PutObjectAcl(c, bucket, object, "", nil, nil, aclBody) +} + +// PutObjectAclFromCanned - set the canned acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - cannedAcl: the cannedAcl string +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromCanned(bucket, object, cannedAcl string) error { + return api.PutObjectAcl(c, bucket, object, cannedAcl, nil, nil, nil) +} + +// PutObjectAclGrantRead - set the canned grant read acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - ids: the user id list to grant read for this object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclGrantRead(bucket, object string, ids ...string) error { + return api.PutObjectAcl(c, bucket, object, "", ids, nil, nil) +} + +// PutObjectAclGrantFullControl - set the canned grant full-control acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - ids: the user id list to grant full-control for this object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclGrantFullControl(bucket, object string, ids ...string) error { + return api.PutObjectAcl(c, bucket, object, "", nil, ids, nil) +} + +// PutObjectAclFromFile - set the acl of the given object with acl json file name +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclFile: the acl file name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromFile(bucket, object, aclFile string) error { + body, err := bce.NewBodyFromFile(aclFile) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// PutObjectAclFromString - set the acl of the given object with acl json string +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - aclString: the acl string with json format +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromString(bucket, object, aclString string) error { + body, err := bce.NewBodyFromString(aclString) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// PutObjectAclFromStruct - set the acl of the given object with acl data structure +// +// PARAMS: +// - bucket: the bucket name +// - aclObj: the acl struct object +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutObjectAclFromStruct(bucket, object string, aclObj *api.PutObjectAclArgs) error { + jsonBytes, jsonErr := json.Marshal(aclObj) + if jsonErr != nil { + return jsonErr + } + body, err := bce.NewBodyFromBytes(jsonBytes) + if err != nil { + return err + } + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) +} + +// GetObjectAcl - get the acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - *api.GetObjectAclResult: the result of the object acl +// - error: nil if success otherwise the specific error +func (c *Client) GetObjectAcl(bucket, object string) (*api.GetObjectAclResult, error) { + return api.GetObjectAcl(c, bucket, object) +} + +// DeleteObjectAcl - delete the acl of the given object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteObjectAcl(bucket, object string) error { + return api.DeleteObjectAcl(c, bucket, object) +} + +// RestoreObject - restore the archive object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - restoreDays: the effective time of restore +// - restoreTier: the tier of restore +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) RestoreObject(bucket string, object string, restoreDays int, restoreTier string) error { + if _, ok := api.VALID_RESTORE_TIER[restoreTier]; !ok { + return errors.New("invalid restore tier") + } + + if restoreDays <= 0 { + return errors.New("invalid restore days") + } + + args := api.ArchiveRestoreArgs{ + RestoreTier: restoreTier, + RestoreDays: restoreDays, + } + return api.RestoreObject(c, bucket, object, args) +} + +// PutBucketTrash - put the bucket trash +// +// PARAMS: +// - bucket: the bucket name +// - trashReq: the trash request +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketTrash(bucket string, trashReq api.PutBucketTrashReq) error { + return api.PutBucketTrash(c, bucket, trashReq) +} + +// GetBucketTrash - get the bucket trash +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.GetBucketTrashResult,: the result of the bucket trash +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketTrash(bucket string) (*api.GetBucketTrashResult, error) { + return api.GetBucketTrash(c, bucket) +} + +// DeleteBucketTrash - delete the trash of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketTrash(bucket string) error { + return api.DeleteBucketTrash(c, bucket) +} + +// PutBucketNotification - put the bucket notification +// +// PARAMS: +// - bucket: the bucket name +// - putBucketNotificationReq: the bucket notification request +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) PutBucketNotification(bucket string, putBucketNotificationReq api.PutBucketNotificationReq) error { + return api.PutBucketNotification(c, bucket, putBucketNotificationReq) +} + +// GetBucketNotification - get the bucket notification +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - *api.PutBucketNotificationReq,: the result of the bucket notification +// - error: nil if success otherwise the specific error +func (c *Client) GetBucketNotification(bucket string) (*api.PutBucketNotificationReq, error) { + return api.GetBucketNotification(c, bucket) +} + +// DeleteBucketNotification - delete the notification of the given bucket +// +// PARAMS: +// - bucket: the bucket name +// RETURNS: +// - error: nil if success otherwise the specific error +func (c *Client) DeleteBucketNotification(bucket string) error { + return api.DeleteBucketNotification(c, bucket) +} + +// ParallelUpload - auto multipart upload object +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - filename: the filename +// - contentType: the content type default(application/octet-stream) +// - args: the bucket name nil using default +// RETURNS: +// - *api.CompleteMultipartUploadResult: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) ParallelUpload(bucket string, object string, filename string, contentType string, args *api.InitiateMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { + + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, bucket, object, contentType, args) + if err != nil { + return nil, err + } + + partEtags, err := c.parallelPartUpload(bucket, object, filename, initiateMultipartUploadResult.UploadId) + if err != nil { + c.AbortMultipartUpload(bucket, object, initiateMultipartUploadResult.UploadId) + return nil, err + } + + completeMultipartUploadResult, err := c.CompleteMultipartUploadFromStruct(bucket, object, initiateMultipartUploadResult.UploadId, &api.CompleteMultipartUploadArgs{Parts: partEtags}) + if err != nil { + c.AbortMultipartUpload(bucket, object, initiateMultipartUploadResult.UploadId) + return nil, err + } + return completeMultipartUploadResult, nil +} + +// parallelPartUpload - single part upload +// +// PARAMS: +// - bucket: the bucket name +// - object: the object name +// - filename: the uploadId +// - uploadId: the uploadId +// RETURNS: +// - []api.UploadInfoType: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) parallelPartUpload(bucket string, object string, filename string, uploadId string) ([]api.UploadInfoType, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + // 分块大小按MULTIPART_ALIGN=1MB对齐 + partSize := (c.MultipartSize + + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + + // 获取文件大小,并计算分块数目,最大分块数MAX_PART_NUMBER=10000 + fileInfo, _ := file.Stat() + fileSize := fileInfo.Size() + partNum := (fileSize + partSize - 1) / partSize + if partNum > MAX_PART_NUMBER { // 超过最大分块数,需调整分块大小 + partSize = (fileSize + MAX_PART_NUMBER + 1) / MAX_PART_NUMBER + partSize = (partSize + MULTIPART_ALIGN - 1) / MULTIPART_ALIGN * MULTIPART_ALIGN + partNum = (fileSize + partSize - 1) / partSize + } + + parallelChan := make(chan int, c.MaxParallel) + + errChan := make(chan error, c.MaxParallel) + + resultChan := make(chan api.UploadInfoType, partNum) + + // 逐个分块上传 + for i := int64(1); i <= partNum; i++ { + // 计算偏移offset和本次上传的大小uploadSize + uploadSize := partSize + offset := partSize * (i - 1) + left := fileSize - offset + if left < partSize { + uploadSize = left + } + + // 创建指定偏移、指定大小的文件流 + partBody, _ := bce.NewBodyFromSectionFile(file, offset, uploadSize) + + select { + case err = <-errChan: + return nil, err + default: + select { + case err = <-errChan: + return nil, err + case parallelChan <- 1: + go c.singlePartUpload(bucket, object, uploadId, int(i), partBody, parallelChan, errChan, resultChan) + } + + } + } + + partEtags := make([]api.UploadInfoType, partNum) + for i := int64(0); i < partNum; i++ { + select { + case err := <-errChan: + return nil, err + case result := <-resultChan: + partEtags[result.PartNumber-1].PartNumber = result.PartNumber + partEtags[result.PartNumber-1].ETag = result.ETag + } + } + return partEtags, nil +} + +// singlePartUpload - single part upload +// +// PARAMS: +// - pararelChan: the pararelChan +// - errChan: the error chan +// - result: the upload result chan +// - bucket: the bucket name +// - object: the object name +// - uploadId: the uploadId +// - partNumber: the part number of the object +// - content: the content of current part +func (c *Client) singlePartUpload( + bucket string, object string, uploadId string, + partNumber int, content *bce.Body, + parallelChan chan int, errChan chan error, result chan api.UploadInfoType) { + + defer func() { + if r := recover(); r != nil { + log.Fatal("parallelPartUpload recovered in f:", r) + errChan <- errors.New("parallelPartUpload panic") + } + <-parallelChan + }() + + var args api.UploadPartArgs + args.ContentMD5 = content.ContentMD5() + + etag, err := api.UploadPart(c, bucket, object, uploadId, partNumber, content, &args) + if err != nil { + errChan <- err + log.Error("upload part fail,err:%v", err) + return + } + result <- api.UploadInfoType{PartNumber: partNumber, ETag: etag} + return +} + +// ParallelCopy - auto multipart copy object +// +// PARAMS: +// - srcBucketName: the src bucket name +// - srcObjectName: the src object name +// - destBucketName: the dest bucket name +// - destObjectName: the dest object name +// - args: the copy args +// - srcClient: the src region client +// RETURNS: +// - *api.CompleteMultipartUploadResult: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) ParallelCopy(srcBucketName string, srcObjectName string, + destBucketName string, destObjectName string, + args *api.MultiCopyObjectArgs, srcClient *Client) (*api.CompleteMultipartUploadResult, error) { + + if srcClient == nil { + srcClient = c + } + objectMeta, err := srcClient.GetObjectMeta(srcBucketName, srcObjectName) + if err != nil { + return nil, err + } + + initArgs := api.InitiateMultipartUploadArgs{ + CacheControl: objectMeta.CacheControl, + ContentDisposition: objectMeta.ContentDisposition, + Expires: objectMeta.Expires, + StorageClass: objectMeta.StorageClass, + } + if args != nil { + if len(args.StorageClass) != 0 { + initArgs.StorageClass = args.StorageClass + } + } + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, destBucketName, destObjectName, objectMeta.ContentType, &initArgs) + + if err != nil { + return nil, err + } + + source := fmt.Sprintf("/%s/%s", srcBucketName, srcObjectName) + partEtags, err := c.parallelPartCopy(*objectMeta, source, destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + + if err != nil { + c.AbortMultipartUpload(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + return nil, err + } + + completeMultipartUploadResult, err := c.CompleteMultipartUploadFromStruct(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId, &api.CompleteMultipartUploadArgs{Parts: partEtags}) + if err != nil { + c.AbortMultipartUpload(destBucketName, destObjectName, initiateMultipartUploadResult.UploadId) + return nil, err + } + return completeMultipartUploadResult, nil +} + +// parallelPartCopy - parallel part copy +// +// PARAMS: +// - srcMeta: the copy source object meta +// - source: the copy source +// - bucket: the dest bucket name +// - object: the dest object name +// - uploadId: the uploadId +// RETURNS: +// - []api.UploadInfoType: multipart upload result +// - error: nil if success otherwise the specific error +func (c *Client) parallelPartCopy(srcMeta api.GetObjectMetaResult, source string, bucket string, object string, uploadId string) ([]api.UploadInfoType, error) { + var err error + size := srcMeta.ContentLength + partSize := int64(DEFAULT_MULTIPART_SIZE) + + partNum := (size + partSize - 1) / partSize + + parallelChan := make(chan int, c.MaxParallel) + + errChan := make(chan error, c.MaxParallel) + + resultChan := make(chan api.UploadInfoType, partNum) + + for i := int64(1); i <= partNum; i++ { + // 计算偏移offset和本次上传的大小uploadSize + uploadSize := partSize + offset := partSize * (i - 1) + left := size - offset + if left < partSize { + uploadSize = left + } + + partCopyArgs := api.UploadPartCopyArgs{ + SourceRange: fmt.Sprintf("bytes=%d-%d", (i-1)*partSize, (i-1)*partSize+uploadSize-1), + IfMatch: srcMeta.ETag, + } + + select { + case err = <-errChan: + return nil, err + default: + select { + case err = <-errChan: + return nil, err + case parallelChan <- 1: + go c.singlePartCopy(source, bucket, object, uploadId, int(i), &partCopyArgs, parallelChan, errChan, resultChan) + } + + } + } + + partEtags := make([]api.UploadInfoType, partNum) + for i := int64(0); i < partNum; i++ { + select { + case err := <-errChan: + return nil, err + case result := <-resultChan: + partEtags[result.PartNumber-1].PartNumber = result.PartNumber + partEtags[result.PartNumber-1].ETag = result.ETag + } + } + return partEtags, nil +} + +// singlePartCopy - single part copy +// +// PARAMS: +// - pararelChan: the pararelChan +// - errChan: the error chan +// - result: the upload result chan +// - source: the copy source +// - bucket: the bucket name +// - object: the object name +// - uploadId: the uploadId +// - partNumber: the part number of the object +// - args: the copy args +func (c *Client) singlePartCopy(source string, bucket string, object string, uploadId string, + partNumber int, args *api.UploadPartCopyArgs, + parallelChan chan int, errChan chan error, result chan api.UploadInfoType) { + + defer func() { + if r := recover(); r != nil { + log.Fatal("parallelPartUpload recovered in f:", r) + errChan <- errors.New("parallelPartUpload panic") + } + <-parallelChan + }() + + copyObjectResult, err := api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) + if err != nil { + errChan <- err + log.Error("upload part fail,err:%v", err) + return + } + result <- api.UploadInfoType{PartNumber: partNumber, ETag: copyObjectResult.ETag} + return +} + +// PutSymlink - create symlink for exist target object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the object +// - symlinkKey: the name of the symlink +// - symlinkArgs: the optional arguments +// RETURNS: +// - error: the put error if any occurs +func (c *Client) PutSymlink(bucket string, object string, symlinkKey string, symlinkArgs *api.PutSymlinkArgs) error { + return api.PutObjectSymlink(c, bucket, object, symlinkKey, symlinkArgs) +} + +// PutSymlink - create symlink for exist target object +// +// PARAMS: +// - bucket: the name of the bucket +// - object: the name of the symlink +// RETURNS: +// - string: the target of the symlink +// - error: the put error if any occurs +func (c *Client) GetSymlink(bucket string, object string) (string, error) { + return api.GetObjectSymlink(c, bucket, object) +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go b/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go new file mode 100644 index 0000000..81f5498 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/log/logger.go @@ -0,0 +1,393 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// logger.go - defines the logger structure and methods + +// Package log implements the log facilities for BCE. It supports log to stderr, stdout as well as +// log to file with rotating. It is safe to be called by multiple goroutines. +// By using the package level function to use the default logger: +// log.SetLogHandler(log.STDOUT | log.FILE) // default is log to stdout +// log.SetLogDir("/tmp") // default is /tmp +// log.SetRotateType(log.ROTATE_DAY) // default is log.HOUR +// log.SetRotateSize(1 << 30) // default is 1GB +// log.SetLogLevel(log.INFO) // default is log.DEBUG +// log.Debug(1, 1.2, "a") +// log.Debugln(1, 1.2, "a") +// log.Debugf(1, 1.2, "a") +// User can also create new logger without using the default logger: +// customLogger := log.NewLogger() +// customLogger.SetLogHandler(log.FILE) +// customLogger.Debug(1, 1.2, "a") +// The log format can also support custom setting by using the following interface: +// log.SetLogFormat([]string{log.FMT_LEVEL, log.FMT_TIME, log.FMT_MSG}) +// Most of the cases just use the default format is enough: +// []string{FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG} +package log + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +type Handler uint8 + +// Constants for log handler flags, default is STDOUT +const ( + NONE Handler = 0 + STDOUT Handler = 1 + STDERR Handler = 1 << 1 + FILE Handler = 1 << 2 +) + +type RotateStrategy uint8 + +// Constants for log rotating strategy when logging to file, default is by hour +const ( + ROTATE_NONE RotateStrategy = iota + ROTATE_DAY + ROTATE_HOUR + ROTATE_MINUTE + ROTATE_SIZE + + DEFAULT_ROTATE_TYPE = ROTATE_HOUR + DEFAULT_ROTATE_SIZE int64 = 1 << 30 + DEFAULT_LOG_DIR = "/tmp" + ROTATE_SIZE_FILE_PREFIX = "rotating" +) + +type Level uint8 + +// Constants for log levels, default is DEBUG +const ( + DEBUG Level = iota + INFO + WARN + ERROR + FATAL + PANIC +) + +var gLevelString = [...]string{"DEBUG", "INFO", "WARN", "ERROR", "FATAL", "PANIC"} + +// Constants of the log format components to support user custom specification +const ( + FMT_LEVEL = "level" + FMT_LTIME = "ltime" // long time with microsecond + FMT_TIME = "time" // just with second + FMT_LOCATION = "location" // caller's location with file, line, function + FMT_MSG = "msg" +) + +var ( + LOG_FMT_STR = map[string]string{ + FMT_LEVEL: "[%s]", + FMT_LTIME: "2006-01-02 15:04:05.000000", + FMT_TIME: "2006-01-02 15:04:05", + FMT_LOCATION: "%s:%d:%s:", + FMT_MSG: "%s", + } + gDefaultLogFormat = []string{FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG} +) + +type writerArgs struct { + record string + rotateArgs interface{} // used for rotating: the size of the record or the logging time +} + +// Logger defines the internal implementation of the log facility +type logger struct { + writers map[Handler]io.WriteCloser // the destination writer to log message + writerChan chan *writerArgs // the writer channal to pass each record and time or size + logFormat []string + levelThreshold Level + handler Handler + + // Fields that used when logging to file + logDir string + logFile string + rotateType RotateStrategy + rotateSize int64 + done chan bool +} + +func (l *logger) logging(level Level, format string, args ...interface{}) { + // Only log message that set the handler and is greater than or equal to the threshold + if l.handler == NONE || level < l.levelThreshold { + return + } + + // Generate the log record string and pass it to the writer channel + now := time.Now() + pc, file, line, ok, funcname := uintptr(0), "???", 0, true, "???" + pc, file, line, ok = runtime.Caller(2) + if ok { + funcname = runtime.FuncForPC(pc).Name() + funcname = filepath.Ext(funcname) + funcname = strings.TrimPrefix(funcname, ".") + file = filepath.Base(file) + } + buf := make([]string, 0, len(l.logFormat)) + msg := fmt.Sprintf(format, args...) + for _, f := range l.logFormat { + if _, exists := LOG_FMT_STR[f]; !exists { // skip not supported part + continue + } + fmtStr := LOG_FMT_STR[f] + switch f { + case FMT_LEVEL: + buf = append(buf, fmt.Sprintf(fmtStr, gLevelString[level])) + case FMT_LTIME: + buf = append(buf, now.Format(fmtStr)) + case FMT_TIME: + buf = append(buf, now.Format(fmtStr)) + case FMT_LOCATION: + buf = append(buf, fmt.Sprintf(fmtStr, file, line, funcname)) + case FMT_MSG: + buf = append(buf, fmt.Sprintf(fmtStr, msg)) + } + } + record := strings.Join(buf, " ") + if l.rotateType == ROTATE_SIZE { + l.writerChan <- &writerArgs{record, int64(len(record))} + } else { + l.writerChan <- &writerArgs{record, now} + } + + // wait for current record done logging +} + +func (l *logger) buildWriter(args interface{}) { + if l.handler&STDOUT == STDOUT { + l.writers[STDOUT] = os.Stdout + } else { + delete(l.writers, STDOUT) + } + if l.handler&STDERR == STDERR { + l.writers[STDERR] = os.Stderr + } else { + delete(l.writers, STDERR) + } + if l.handler&FILE == FILE { + l.writers[FILE] = l.buildFileWriter(args) + } else { + delete(l.writers, FILE) + } +} + +func (l *logger) buildFileWriter(args interface{}) io.WriteCloser { + if l.handler&FILE != FILE { + return os.Stderr + } + + if len(l.logDir) == 0 { + l.logDir = DEFAULT_LOG_DIR + } + if l.rotateType < ROTATE_NONE || l.rotateType > ROTATE_SIZE { + l.rotateType = DEFAULT_ROTATE_TYPE + } + if l.rotateType == ROTATE_SIZE && l.rotateSize == 0 { + l.rotateSize = DEFAULT_ROTATE_SIZE + } + + logFile, needCreateFile := "", false + if l.rotateType == ROTATE_SIZE { + recordSize, _ := args.(int64) + logFile, needCreateFile = l.buildFileWriterBySize(recordSize) + } else { + recordTime, _ := args.(time.Time) + switch l.rotateType { + case ROTATE_NONE: + logFile = "default.log" + case ROTATE_DAY: + logFile = recordTime.Format("2006-01-02.log") + case ROTATE_HOUR: + logFile = recordTime.Format("2006-01-02_15.log") + case ROTATE_MINUTE: + logFile = recordTime.Format("2006-01-02_15-04.log") + } + if _, exist := getFileInfo(filepath.Join(l.logDir, logFile)); !exist { + needCreateFile = true + } + } + l.logFile = logFile + logFile = filepath.Join(l.logDir, l.logFile) + + // Should create new file + if needCreateFile { + if w, ok := l.writers[FILE]; ok { + w.Close() + } + if writer, err := os.Create(logFile); err == nil { + return writer + } else { + return os.Stderr + } + } + + // Already open the file + if w, ok := l.writers[FILE]; ok { + return w + } + + // Newly open the file + if writer, err := os.OpenFile(logFile, os.O_WRONLY|os.O_APPEND, 0666); err == nil { + return writer + } else { + return os.Stderr + } +} + +func (l *logger) buildFileWriterBySize(recordSize int64) (string, bool) { + logFile, needCreateFile := "", false + // First running the program and need to get filename by checking the existed files + if len(l.logFile) == 0 { + fname := fmt.Sprintf("%s-%s.0.log", ROTATE_SIZE_FILE_PREFIX, getSizeString(l.rotateSize)) + for { + size, exist := getFileInfo(filepath.Join(l.logDir, fname)) + if !exist { + logFile, needCreateFile = fname, true + break + } + if exist && size+recordSize <= l.rotateSize { + logFile, needCreateFile = fname, false + break + } + fname = getNextFileName(fname) + } + } else { // check the file size to append to the existed file or create a new file + currentFile := filepath.Join(l.logDir, l.logFile) + size, exist := getFileInfo(currentFile) + if !exist { + logFile, needCreateFile = l.logFile, true + } else { + if size+recordSize > l.rotateSize { // size exceeded + logFile, needCreateFile = getNextFileName(l.logFile), true + } else { + logFile, needCreateFile = l.logFile, false + } + } + } + return logFile, needCreateFile +} + +func (l *logger) SetHandler(h Handler) { l.handler = h } + +func (l *logger) SetLogDir(dir string) { l.logDir = dir } + +func (l *logger) SetLogLevel(level Level) { l.levelThreshold = level } + +func (l *logger) SetLogFormat(format []string) { l.logFormat = format } + +func (l *logger) SetRotateType(rotate RotateStrategy) { l.rotateType = rotate } + +func (l *logger) SetRotateSize(size int64) { l.rotateSize = size } + +func (l *logger) Debug(msg ...interface{}) { l.logging(DEBUG, "%s\n", concat(msg...)) } + +func (l *logger) Debugln(msg ...interface{}) { l.logging(DEBUG, "%s\n", concat(msg...)) } + +func (l *logger) Debugf(f string, msg ...interface{}) { l.logging(DEBUG, f+"\n", msg...) } + +func (l *logger) Info(msg ...interface{}) { l.logging(INFO, "%s\n", concat(msg...)) } + +func (l *logger) Infoln(msg ...interface{}) { l.logging(INFO, "%s\n", concat(msg...)) } + +func (l *logger) Infof(f string, msg ...interface{}) { l.logging(INFO, f+"\n", msg...) } + +func (l *logger) Warn(msg ...interface{}) { l.logging(WARN, "%s\n", concat(msg...)) } + +func (l *logger) Warnln(msg ...interface{}) { l.logging(WARN, "%s\n", concat(msg...)) } + +func (l *logger) Warnf(f string, msg ...interface{}) { l.logging(WARN, f+"\n", msg...) } + +func (l *logger) Error(msg ...interface{}) { l.logging(ERROR, "%s\n", concat(msg...)) } + +func (l *logger) Errorln(msg ...interface{}) { l.logging(ERROR, "%s\n", concat(msg...)) } + +func (l *logger) Errorf(f string, msg ...interface{}) { l.logging(ERROR, f+"\n", msg...) } + +func (l *logger) Fatal(msg ...interface{}) { l.logging(FATAL, "%s\n", concat(msg...)) } + +func (l *logger) Fatalln(msg ...interface{}) { l.logging(FATAL, "%s\n", concat(msg...)) } + +func (l *logger) Fatalf(f string, msg ...interface{}) { l.logging(FATAL, f+"\n", msg...) } + +func (l *logger) Panic(msg ...interface{}) { + record := concat(msg...) + l.logging(PANIC, "%s\n", record) + panic(record) +} + +func (l *logger) Panicln(msg ...interface{}) { + record := concat(msg...) + l.logging(PANIC, "%s\n", record) + panic(record) +} + +func (l *logger) Panicf(format string, msg ...interface{}) { + record := fmt.Sprintf(format, msg...) + l.logging(PANIC, format+"\n", msg...) + panic(record) +} + +func (l *logger) Close() { + select { + case <-l.done: + return + default: + } + l.writerChan <- nil +} + +func NewLogger() *logger { + obj := &logger{ + writers: make(map[Handler]io.WriteCloser, 3), // now only support 3 kinds of handler + writerChan: make(chan *writerArgs, 100), + logFormat: gDefaultLogFormat, + levelThreshold: DEBUG, + handler: NONE, + done: make(chan bool), + } + // The backend writer goroutine to write each log record + go func() { + defer func() { + if e := recover(); e != nil { + fmt.Println(e) + } + }() + for { + select { + case <-obj.done: + return + case args := <-obj.writerChan: // wait until a record comes to log + if args == nil { + close(obj.done) + close(obj.writerChan) + return + } + obj.buildWriter(args.rotateArgs) + for _, w := range obj.writers { + fmt.Fprint(w, args.record) + } + } + } + }() + + return obj +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go b/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go new file mode 100644 index 0000000..23a6b39 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/log/util.go @@ -0,0 +1,227 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// util.go - define the package-level default logger and functions for easily use. + +package log + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// The global default logger object to perform logging in this package-level functions +var gDefaultLogger *logger + +func init() { + gDefaultLogger = NewLogger() +} + +// Helper functions +func getFileInfo(path string) (int64, bool) { + info, err := os.Stat(path) + if err != nil { + return -1, false + } else { + return info.Size(), true + } +} + +func getSizeString(size int64) string { + if size < 0 { + return fmt.Sprintf("%d", size) + } + if size < (1 << 10) { + return fmt.Sprintf("%dB", size) + } else if size < (1 << 20) { + return fmt.Sprintf("%dkB", size>>10) + } else if size < (1 << 30) { + return fmt.Sprintf("%dMB", size>>20) + } else if size < (1 << 40) { + return fmt.Sprintf("%dGB", size>>30) + } else { + return "SUPER-LARGE" + } +} + +func getNextFileName(nowFileName string) string { + pos1 := strings.Index(nowFileName, ".") + rest := nowFileName[pos1+1:] + pos2 := strings.Index(rest, ".") + num, _ := strconv.Atoi(rest[0:pos2]) + return fmt.Sprintf("%s.%d.log", nowFileName[0:pos1], num+1) +} + +func concat(msg ...interface{}) string { + buf := make([]string, 0, len(msg)) + for _, m := range msg { + buf = append(buf, fmt.Sprintf("%v", m)) + } + return strings.Join(buf, " ") +} + +// Export package-level functions for logging messages by using the default logger. It supports 3 +// kinds of interface which is similar to the standard package "fmt": with suffix of "ln", "f" or +// nothing. +func Debug(msg ...interface{}) { + gDefaultLogger.logging(DEBUG, "%s\n", concat(msg...)) +} + +func Debugln(msg ...interface{}) { + gDefaultLogger.logging(DEBUG, "%s\n", concat(msg...)) +} + +func Debugf(format string, msg ...interface{}) { + gDefaultLogger.logging(DEBUG, format+"\n", msg...) +} + +func Info(msg ...interface{}) { + gDefaultLogger.logging(INFO, "%s\n", concat(msg...)) +} + +func Infoln(msg ...interface{}) { + gDefaultLogger.logging(INFO, "%s\n", concat(msg...)) +} + +func Infof(format string, msg ...interface{}) { + gDefaultLogger.logging(INFO, format+"\n", msg...) +} + +func Warn(msg ...interface{}) { + gDefaultLogger.logging(WARN, "%s\n", concat(msg...)) +} + +func Warnln(msg ...interface{}) { + gDefaultLogger.logging(WARN, "%s\n", concat(msg...)) +} + +func Warnf(format string, msg ...interface{}) { + gDefaultLogger.logging(WARN, format+"\n", msg...) +} + +func Error(msg ...interface{}) { + gDefaultLogger.logging(ERROR, "%s\n", concat(msg...)) +} + +func Errorln(msg ...interface{}) { + gDefaultLogger.logging(ERROR, "%s\n", concat(msg...)) +} + +func Errorf(format string, msg ...interface{}) { + gDefaultLogger.logging(ERROR, format+"\n", msg...) +} + +func Fatal(msg ...interface{}) { + gDefaultLogger.logging(FATAL, "%s\n", concat(msg...)) +} + +func Fatalln(msg ...interface{}) { + gDefaultLogger.logging(FATAL, "%s\n", concat(msg...)) +} + +func Fatalf(format string, msg ...interface{}) { + gDefaultLogger.logging(FATAL, format+"\n", msg...) +} + +func Panic(msg ...interface{}) { + record := concat(msg...) + gDefaultLogger.logging(PANIC, "%s\n", record) + panic(record) +} + +func Panicln(msg ...interface{}) { + record := concat(msg...) + gDefaultLogger.logging(PANIC, "%s\n", record) + panic(record) +} + +func Panicf(format string, msg ...interface{}) { + record := fmt.Sprintf(format, msg...) + gDefaultLogger.logging(PANIC, format+"\n", msg...) + panic(record) +} + +func Close() { + gDefaultLogger.Close() +} + +// SetLogHandler - set the handler of the logger +// +// PARAMS: +// - Handler: the handler defined in this package, now just support STDOUT, STDERR, FILE +func SetLogHandler(h Handler) { + gDefaultLogger.handler = h +} + +// SetLogLevel - set the level threshold of the logger, only level equal to or bigger than this +// value will be logged. +// +// PARAMS: +// - Level: the level defined in this package, now support 6 levels. +func SetLogLevel(l Level) { + gDefaultLogger.levelThreshold = l +} + +// SetLogFormat - set the log component of each record when logging it. The default log format is +// {FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG}. +// +// PARAMS: +// - format: the format component array. +func SetLogFormat(format []string) { + gDefaultLogger.logFormat = format +} + +// SetLogDir - set the logging directory if logging to file. +// +// PARAMS: +// - dir: the logging directory +// RETURNS: +// - error: check the directory and try to make it, otherwise return the error. +func SetLogDir(dir string) error { + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if mkErr := os.Mkdir(dir, os.ModePerm); mkErr != nil { + return mkErr + } + } else { + return err + } + } + gDefaultLogger.logDir = dir + return nil +} + +// SetRotateType - set the rotating strategy if logging to file. +// +// PARAMS: +// - RotateStrategy: the rotate strategy defined in this package, now support 5 strategy. +func SetRotateType(r RotateStrategy) { + gDefaultLogger.rotateType = r +} + +// SetRotateSize - set the rotating size if logging to file and set the strategy of size. +// +// PARAMS: +// - size: the rotating size +// RETURNS: +// - error: check the value and return any error if error occurs. +func SetRotateSize(size int64) error { + if size <= 0 { + return fmt.Errorf("%s", "rotate size should not be negative") + } + gDefaultLogger.rotateSize = size + return nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/string.go b/vendor/github.com/baidubce/bce-sdk-go/util/string.go new file mode 100644 index 0000000..3bb0b10 --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/string.go @@ -0,0 +1,88 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// string.go - define the string util function + +// Package util defines the common utilities including string and time. +package util + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "io" +) + +func HmacSha256Hex(key, str_to_sign string) string { + hasher := hmac.New(sha256.New, []byte(key)) + hasher.Write([]byte(str_to_sign)) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func CalculateContentMD5(data io.Reader, size int64) (string, error) { + hasher := md5.New() + n, err := io.CopyN(hasher, data, size) + if err != nil { + return "", fmt.Errorf("calculate content-md5 occurs error: %v", err) + } + if n != size { + return "", fmt.Errorf("calculate content-md5 writing size %d != size %d", n, size) + } + return base64.StdEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +func UriEncode(uri string, encodeSlash bool) string { + var byte_buf bytes.Buffer + for _, b := range []byte(uri) { + if (b >= 'A' && b <= 'Z') || (b >= 'a' && b <= 'z') || (b >= '0' && b <= '9') || + b == '-' || b == '_' || b == '.' || b == '~' || (b == '/' && !encodeSlash) { + byte_buf.WriteByte(b) + } else { + byte_buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + return byte_buf.String() +} + +func NewUUID() string { + var buf [16]byte + for { + if _, err := rand.Read(buf[:]); err == nil { + break + } + } + buf[6] = (buf[6] & 0x0f) | (4 << 4) + buf[8] = (buf[8] & 0xbf) | 0x80 + + res := make([]byte, 36) + hex.Encode(res[0:8], buf[0:4]) + res[8] = '-' + hex.Encode(res[9:13], buf[4:6]) + res[13] = '-' + hex.Encode(res[14:18], buf[6:8]) + res[18] = '-' + hex.Encode(res[19:23], buf[8:10]) + res[23] = '-' + hex.Encode(res[24:], buf[10:]) + return string(res) +} + +func NewRequestId() string { + return NewUUID() +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/util/time.go b/vendor/github.com/baidubce/bce-sdk-go/util/time.go new file mode 100644 index 0000000..e9f9e3d --- /dev/null +++ b/vendor/github.com/baidubce/bce-sdk-go/util/time.go @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Baidu, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ + +// time.go - define the time utility functions for BCE + +package util + +import "time" + +const ( + RFC822Format = "Mon, 02 Jan 2006 15:04:05 MST" + ISO8601Format = "2006-01-02T15:04:05Z" +) + +func NowUTCSeconds() int64 { return time.Now().UTC().Unix() } + +func NowUTCNanoSeconds() int64 { return time.Now().UTC().UnixNano() } + +func FormatRFC822Date(timestamp_second int64) string { + tm := time.Unix(timestamp_second, 0).UTC() + return tm.Format(RFC822Format) +} + +func ParseRFC822Date(time_string string) (time.Time, error) { + return time.Parse(RFC822Format, time_string) +} + +func FormatISO8601Date(timestamp_second int64) string { + tm := time.Unix(timestamp_second, 0).UTC() + return tm.Format(ISO8601Format) +} + +func ParseISO8601Date(time_string string) (time.Time, error) { + return time.Parse(ISO8601Format, time_string) +} diff --git a/vendor/github.com/clbanning/mxj/LICENSE b/vendor/github.com/clbanning/mxj/LICENSE new file mode 100644 index 0000000..f27bccd --- /dev/null +++ b/vendor/github.com/clbanning/mxj/LICENSE @@ -0,0 +1,55 @@ +Copyright (c) 2012-2016 Charles Banning . All rights reserved. + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +=============================================================================== + +Go Language Copyright & License - + +Copyright 2009 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/clbanning/mxj/anyxml.go b/vendor/github.com/clbanning/mxj/anyxml.go new file mode 100644 index 0000000..ec2f3df --- /dev/null +++ b/vendor/github.com/clbanning/mxj/anyxml.go @@ -0,0 +1,189 @@ +package mxj + +import ( + "encoding/xml" + "reflect" +) + +const ( + DefaultElementTag = "element" +) + +// Encode arbitrary value as XML. +// +// Note: unmarshaling the resultant +// XML may not return the original value, since tag labels may have been injected +// to create the XML representation of the value. +/* + Encode an arbitrary JSON object. + package main + + import ( + "encoding/json" + "fmt" + "github.com/clbanning/mxj" + ) + + func main() { + jsondata := []byte(`[ + { "somekey":"somevalue" }, + "string", + 3.14159265, + true + ]`) + var i interface{} + err := json.Unmarshal(jsondata, &i) + if err != nil { + // do something + } + x, err := mxj.AnyXmlIndent(i, "", " ", "mydoc") + if err != nil { + // do something else + } + fmt.Println(string(x)) + } + + output: + + somevalue + string + 3.14159265 + true + +*/ +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXml( v, myRootTag, myElementTag). +func AnyXml(v interface{}, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte("<" + rt + ">"), nil + } + return []byte("<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.Marshal(v) + } + + var err error + s := new(string) + p := new(pretty) + + var ss string + var b []byte + switch v.(type) { + case []interface{}: + ss = "<" + rt + ">" + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = mapToXmlIndent(false, s, tag, val, p) + } + } else { + err = mapToXmlIndent(false, s, et, vv, p) + } + default: + err = mapToXmlIndent(false, s, et, vv, p) + } + if err != nil { + break + } + } + ss += *s + "" + b = []byte(ss) + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.Xml(rt) + default: + err = mapToXmlIndent(false, s, rt, v, p) + b = []byte(*s) + } + + return b, err +} + +// Encode an arbitrary value as a pretty XML string. +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXmlIndent( v, "", " ", myRootTag, myElementTag). +func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte(prefix + "<" + rt + ">"), nil + } + return []byte(prefix + "<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.MarshalIndent(v, prefix, indent) + } + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + var ss string + var b []byte + switch v.(type) { + case []interface{}: + ss = "<" + rt + ">\n" + p.Indent() + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = mapToXmlIndent(true, s, tag, val, p) + } + } else { + p.start = 1 // we 1 tag in + err = mapToXmlIndent(true, s, et, vv, p) + *s += "\n" + } + default: + p.start = 0 // in case trailing p.start = 1 + err = mapToXmlIndent(true, s, et, vv, p) + } + if err != nil { + break + } + } + ss += *s + "" + b = []byte(ss) + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.XmlIndent(prefix, indent, rt) + default: + err = mapToXmlIndent(true, s, rt, v, p) + b = []byte(*s) + } + + return b, err +} diff --git a/vendor/github.com/clbanning/mxj/atomFeedString.xml b/vendor/github.com/clbanning/mxj/atomFeedString.xml new file mode 100644 index 0000000..474575a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/atomFeedString.xml @@ -0,0 +1,54 @@ + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + diff --git a/vendor/github.com/clbanning/mxj/doc.go b/vendor/github.com/clbanning/mxj/doc.go new file mode 100644 index 0000000..8ed79a5 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/doc.go @@ -0,0 +1,134 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2015, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +/* +Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package. The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them. + +Note: this library was designed for processing ad hoc anonymous messages. Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly. + +Related Packages: + checkxml: github.com/clbanning/checkxml provides functions for validating XML data. + +Notes: + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + +SUMMARY + + type Map map[string]interface{} + + Create a Map value, 'mv', from any map[string]interface{} value, 'v': + mv := Map(v) + + Unmarshal / marshal XML as a Map value, 'mv': + mv, err := NewMapXml(xmlValue) // unmarshal + xmlValue, err := mv.Xml() // marshal + + Unmarshal XML from an io.Reader as a Map value, 'mv': + mv, err := NewMapXmlReader(xmlReader) // repeated calls, as with an os.File Reader, will process stream + mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded + + Marshal Map value, 'mv', to an XML Writer (io.Writer): + err := mv.XmlWriter(xmlWriter) + raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter + + Also, for prettified output: + xmlValue, err := mv.XmlIndent(prefix, indent, ...) + err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...) + raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...) + + Bulk process XML with error handling (note: handlers must return a boolean value): + err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error)) + err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte)) + + Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader. + + There are comparable functions and methods for JSON processing. + + Arbitrary structure values can be decoded to / encoded from Map values: + mv, err := NewMapStruct(structVal) + err := mv.Struct(structPointer) + + To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON + or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then: + paths := mv.PathsForKey(key) + path := mv.PathForKeyShortest(key) + values, err := mv.ValuesForKey(key, subkeys) + values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays. + count, err := mv.UpdateValuesForPath(newVal, path, subkeys) + + Get everything at once, irrespective of path depth: + leafnodes := mv.LeafNodes() + leafvalues := mv.LeafValues() + + A new Map with whatever keys are desired can be created from the current Map and then encoded in XML + or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.) + newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N") + newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go" + newXml, err := newMap.Xml() // for example + newJson, err := newMap.Json() // ditto + +XML PARSING CONVENTIONS + + Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + + Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - something parses to map["ns:key"]interface{}{"something"} + - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"} + + Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +XML ENCODING CONVENTIONS + + - 'nil' Map values, which may represent 'null' JSON values, are encoded as "". + NOTE: the operation is not symmetric as "" elements are decoded as 'tag:""' Map values, + which, then, encode in JSON as '"tag":""' values.. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +*/ +package mxj diff --git a/vendor/github.com/clbanning/mxj/escapechars.go b/vendor/github.com/clbanning/mxj/escapechars.go new file mode 100644 index 0000000..bee0442 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/escapechars.go @@ -0,0 +1,54 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" +) + +var xmlEscapeChars bool + +// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values. +// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is +// then '&' will be re-escaped as '&amp;'. +// +/* + The values are: + " " + ' ' + < < + > > + & & +*/ +func XMLEscapeChars(b bool) { + xmlEscapeChars = b +} + +// Scan for '&' first, since 's' may contain "&" that is parsed to "&amp;" +// - or "<" that is parsed to "&lt;". +var escapechars = [][2][]byte{ + {[]byte(`&`), []byte(`&`)}, + {[]byte(`<`), []byte(`<`)}, + {[]byte(`>`), []byte(`>`)}, + {[]byte(`"`), []byte(`"`)}, + {[]byte(`'`), []byte(`'`)}, +} + +func escapeChars(s string) string { + if len(s) == 0 { + return s + } + + b := []byte(s) + for _, v := range escapechars { + n := bytes.Count(b, v[0]) + if n == 0 { + continue + } + b = bytes.Replace(b, v[0], v[1], n) + } + return string(b) +} + diff --git a/vendor/github.com/clbanning/mxj/exists.go b/vendor/github.com/clbanning/mxj/exists.go new file mode 100644 index 0000000..2fb3084 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/exists.go @@ -0,0 +1,7 @@ +package mxj + +// Checks whether the path exists +func (mv Map) Exists(path string, subkeys ...string) bool { + v, err := mv.ValuesForPath(path, subkeys...) + return err == nil && len(v) > 0 +} diff --git a/vendor/github.com/clbanning/mxj/files.go b/vendor/github.com/clbanning/mxj/files.go new file mode 100644 index 0000000..27e06e1 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files.go @@ -0,0 +1,287 @@ +package mxj + +import ( + "fmt" + "io" + "os" +) + +type Maps []Map + +func NewMaps() Maps { + return make(Maps, 0) +} + +type MapRaw struct { + M Map + R []byte +} + +// NewMapsFromXmlFile - creates an array from a file of JSON values. +func NewMapsFromJsonFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values. +func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFile - creates an array from a file of XML values. +func NewMapsFromXmlFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values. +// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw(). +// It is slow at parsing a file from disk and is intended for relatively small utility files. +func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ------------------------ Maps writing ------------------------- +// These are handy-dandy methods for dumping configuration data, etc. + +// JsonString - analogous to mv.Json() +func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) { + var s string + for _, v := range mvs { + j, err := v.Json() + if err != nil { + return s, err + } + s += string(j) + } + return s, nil +} + +// JsonStringIndent - analogous to mv.JsonIndent() +func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) { + var s string + var haveFirst bool + for _, v := range mvs { + j, err := v.JsonIndent(prefix, indent) + if err != nil { + return s, err + } + if haveFirst { + s += "\n" + } else { + haveFirst = true + } + s += string(j) + } + return s, nil +} + +// XmlString - analogous to mv.Xml() +func (mvs Maps) XmlString() (string, error) { + var s string + for _, v := range mvs { + x, err := v.Xml() + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// XmlStringIndent - analogous to mv.XmlIndent() +func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) { + var s string + for _, v := range mvs { + x, err := v.XmlIndent(prefix, indent) + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// JsonFile - write Maps to named file as JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonWriter method. +func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonString(encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// JsonFileIndent - write Maps to named file as pretty JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonIndentWriter method. +func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonStringIndent(prefix, indent, encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFile - write Maps to named file as XML +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlWriter method. +func (mvs Maps) XmlFile(file string) error { + s, err := mvs.XmlString() + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFileIndent - write Maps to named file as pretty XML +// Note: the file will be created,if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlIndentWriter method. +func (mvs Maps) XmlFileIndent(file, prefix, indent string) error { + s, err := mvs.XmlStringIndent(prefix, indent) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} diff --git a/vendor/github.com/clbanning/mxj/files_test.badjson b/vendor/github.com/clbanning/mxj/files_test.badjson new file mode 100644 index 0000000..d187200 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.badjson @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"some", "bad":JSON, "in":"it" } diff --git a/vendor/github.com/clbanning/mxj/files_test.badxml b/vendor/github.com/clbanning/mxj/files_test.badxml new file mode 100644 index 0000000..4736ef9 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.badxml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/files_test.json b/vendor/github.com/clbanning/mxj/files_test.json new file mode 100644 index 0000000..e9a3ddf --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.json @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"just", "two":2, "JSON":"values", "true":true } diff --git a/vendor/github.com/clbanning/mxj/files_test.xml b/vendor/github.com/clbanning/mxj/files_test.xml new file mode 100644 index 0000000..65cf021 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test.xml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.json b/vendor/github.com/clbanning/mxj/files_test_dup.json new file mode 100644 index 0000000..2becb6a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_dup.json @@ -0,0 +1 @@ +{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.xml b/vendor/github.com/clbanning/mxj/files_test_dup.xml new file mode 100644 index 0000000..f68d22e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_dup.xml @@ -0,0 +1 @@ +for files.gotestdoctest casesome \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.json b/vendor/github.com/clbanning/mxj/files_test_indent.json new file mode 100644 index 0000000..6fde156 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_indent.json @@ -0,0 +1,12 @@ +{ + "a": "test", + "file": "for", + "files_test.go": "case", + "this": "is" +} +{ + "JSON": "values", + "true": true, + "two": 2, + "with": "just" +} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.xml b/vendor/github.com/clbanning/mxj/files_test_indent.xml new file mode 100644 index 0000000..8c91a1d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/files_test_indent.xml @@ -0,0 +1,8 @@ + + for files.go + test + + doc + test case + some + \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/gob.go b/vendor/github.com/clbanning/mxj/gob.go new file mode 100644 index 0000000..d56c2fd --- /dev/null +++ b/vendor/github.com/clbanning/mxj/gob.go @@ -0,0 +1,35 @@ +// gob.go - Encode/Decode a Map into a gob object. + +package mxj + +import ( + "bytes" + "encoding/gob" +) + +// NewMapGob returns a Map value for a gob object that has been +// encoded from a map[string]interface{} (or compatible type) value. +// It is intended to provide symmetric handling of Maps that have +// been encoded using mv.Gob. +func NewMapGob(gobj []byte) (Map, error) { + m := make(map[string]interface{}, 0) + if len(gobj) == 0 { + return m, nil + } + r := bytes.NewReader(gobj) + dec := gob.NewDecoder(r) + if err := dec.Decode(&m); err != nil { + return m, err + } + return m, nil +} + +// Gob returns a gob-encoded value for the Map 'mv'. +func (mv Map) Gob() ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(map[string]interface{}(mv)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/clbanning/mxj/json.go b/vendor/github.com/clbanning/mxj/json.go new file mode 100644 index 0000000..eb2c05a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/json.go @@ -0,0 +1,323 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "time" +) + +// ------------------------------ write JSON ----------------------- + +// Just a wrapper on json.Marshal. +// If option safeEncoding is'true' then safe encoding of '<', '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) Json(safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.Marshal(mv) + + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// Just a wrapper on json.MarshalIndent. +// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.MarshalIndent(mv, prefix, indent) + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// The following implementation is provided for symmetry with NewMapJsonReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error { + b, err := mv.Json(safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) { + b, err := mv.Json(safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// Writes the Map as pretty JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// --------------------------- read JSON ----------------------------- + +// Decode numericvalues as json.Number type Map values - see encoding/json#Number. +// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), +// etc.; it does not affect NewMapXml(), etc. The XML encoders mv.Xml() and mv.XmlIndent() +// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number +// value types and the resulting Map can be correctly encoded into a XML object. +var JsonUseNumber bool + +// Just a wrapper on json.Unmarshal +// Converting JSON to XML is a simple as: +// ... +// mapVal, merr := mxj.NewMapJson(jsonVal) +// if merr != nil { +// // handle error +// } +// xmlVal, xerr := mapVal.Xml() +// if xerr != nil { +// // handle error +// } +// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}], +// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map. +// See mxj/j2x/j2x_test.go. +func NewMapJson(jsonVal []byte) (Map, error) { + // empty or nil begets empty + if len(jsonVal) == 0 { + m := make(map[string]interface{}, 0) + return m, nil + } + // handle a goofy case ... + if jsonVal[0] == '[' { + jsonVal = []byte(`{"object":` + string(jsonVal) + `}`) + } + m := make(map[string]interface{}) + // err := json.Unmarshal(jsonVal, &m) + buf := bytes.NewReader(jsonVal) + dec := json.NewDecoder(buf) + if JsonUseNumber { + dec.UseNumber() + } + err := dec.Decode(&m) + return m, err +} + +// Retrieve a Map value from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object. +func NewMapJsonReader(jsonReader io.Reader) (Map, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, err + } + + // Unmarshal the 'presumed' JSON string + return NewMapJson(*jb) +} + +// Retrieve a Map value and raw JSON - []byte - from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object and retrieve the raw JSON in a single call. +func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, *jb, err + } + + // Unmarshal the 'presumed' JSON string + m, merr := NewMapJson(*jb) + return m, *jb, merr +} + +// Pull the next JSON string off the stream: just read from first '{' to its closing '}'. +// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package. +func getJson(rdr io.Reader) (*[]byte, error) { + bval := make([]byte, 1) + jb := make([]byte, 0) + var inQuote, inJson bool + var parenCnt int + var previous byte + + // scan the input for a matched set of {...} + // json.Unmarshal will handle syntax checking. + for { + _, err := rdr.Read(bval) + if err != nil { + if err == io.EOF && inJson && parenCnt > 0 { + return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb)) + } + return &jb, err + } + switch bval[0] { + case '{': + if !inQuote { + parenCnt++ + inJson = true + } + case '}': + if !inQuote { + parenCnt-- + } + if parenCnt < 0 { + return nil, fmt.Errorf("closing } without opening {: %s", string(jb)) + } + case '"': + if inQuote { + if previous == '\\' { + break + } + inQuote = false + } else { + inQuote = true + } + case '\n', '\r', '\t', ' ': + if !inQuote { + continue + } + } + if inJson { + jb = append(jb, bval[0]) + if parenCnt == 0 { + break + } + } + previous = bval[0] + } + + return &jb, nil +} + +// ------------------------------- JSON Reader handler via Map values ----------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var jhandlerPollInterval = time.Duration(1e6) + +// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader(). +// This avoids treating one or other as a special case and discussing the underlying stdlib logic. + +// Bulk process JSON using handlers that process a Map value. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapJsonReader(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process JSON using handlers that process a Map value and the raw JSON. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapJsonReaderRaw(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} diff --git a/vendor/github.com/clbanning/mxj/keyvalues.go b/vendor/github.com/clbanning/mxj/keyvalues.go new file mode 100644 index 0000000..0b244c8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/keyvalues.go @@ -0,0 +1,671 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters. + +package mxj + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +// ----------------------------- get everything FOR a single key ------------------------- + +const ( + minArraySize = 32 +) + +var defaultArraySize int = minArraySize + +// Adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath(). +// This can have the effect of significantly reducing memory allocation-copy functions for large data sets. +// Returns the initial buffer size. +func SetArraySize(size int) int { + if size > minArraySize { + defaultArraySize = size + } else { + defaultArraySize = minArraySize + } + return defaultArraySize +} + +// Return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match. +// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". +// - If the 'key' refers to a list, then "key:value" could select a list member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + ret := make([]interface{}, 0, defaultArraySize) + var cnt int + hasKey(m, key, &ret, &cnt, subKeyMap) + return ret[:cnt], nil +} + +var KeyNotExistError = errors.New("Key does not exist") + +// ValueForKey is a wrapper on ValuesForKey. It returns the first member of []interface{}, if any. +// If there is no value, "nil, nil" is returned. +func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) { + vals, err := mv.ValuesForKey(key, subkeys...) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, KeyNotExistError + } + return vals[0], nil +} + +// hasKey - if the map 'key' exists append it to array +// if it doesn't do nothing except scan array and map values +func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) { + // func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + // see if the current value is of interest + if v, ok := vv[key]; ok { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + + // wildcard case + if key == "*" { + for _, v := range vv { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + } + + // scan the rest + for _, v := range vv { + hasKey(v, key, ret, cnt, subkeys) + } + case []interface{}: + for _, v := range iv.([]interface{}) { + hasKey(v, key, ret, cnt, subkeys) + } + } +} + +// ----------------------- get everything for a node in the Map --------------------------- + +// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.) +// 2014.04.28 - implementation note. +// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion +// of wildcards and unindexed arrays. Embedding such logic into valuesForKeyPath() would have made the +// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead. + +// Retrieve all values for a path from the Map. If len(returned_values) == 0, then no match. +// On error, the returned array is 'nil'. +// 'path' is a dot-separated path of key values. +// - If a node in the path is '*', then everything beyond is walked. +// - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" - +// even "*[2].*[0].field". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". +// - If the 'path' refers to a list, then "tag:value" would return member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + // If there are no array indexes in path, use legacy ValuesForPath() logic. + if strings.Index(path, "[") < 0 { + return mv.oldValuesForPath(path, subkeys...) + } + + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys, kerr := parsePath(path) + if kerr != nil { + return nil, kerr + } + + vals, verr := valuesForArray(keys, mv) + if verr != nil { + return nil, verr // Vals may be nil, but return empty array. + } + + // Need to handle subkeys ... only return members of vals that satisfy conditions. + retvals := make([]interface{}, 0) + for _, v := range vals { + if hasSubKeys(v, subKeyMap) { + retvals = append(retvals, v) + } + } + return retvals, nil +} + +func valuesForArray(keys []*key, m Map) ([]interface{}, error) { + var tmppath string + var haveFirst bool + var vals []interface{} + var verr error + + lastkey := len(keys) - 1 + for i := 0; i <= lastkey; i++ { + if !haveFirst { + tmppath = keys[i].name + haveFirst = true + } else { + tmppath += "." + keys[i].name + } + + // Look-ahead: explode wildcards and unindexed arrays. + // Need to handle un-indexed list recursively: + // e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]". + // Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ... + if !keys[i].isArray && i < lastkey && keys[i+1].isArray { + // Can't pass subkeys because we may not be at literal end of path. + vv, vverr := m.oldValuesForPath(tmppath) + if vverr != nil { + return nil, vverr + } + for _, v := range vv { + // See if we can walk the value. + am, ok := v.(map[string]interface{}) + if !ok { + continue + } + // Work the backend. + nvals, nvalserr := valuesForArray(keys[i+1:], Map(am)) + if nvalserr != nil { + return nil, nvalserr + } + vals = append(vals, nvals...) + } + break // have recursed the whole path - return + } + + if keys[i].isArray || i == lastkey { + // Don't pass subkeys because may not be at literal end of path. + vals, verr = m.oldValuesForPath(tmppath) + } else { + continue + } + if verr != nil { + return nil, verr + } + + if i == lastkey && !keys[i].isArray { + break + } + + // Now we're looking at an array - supposedly. + // Is index in range of vals? + if len(vals) <= keys[i].position { + vals = nil + break + } + + // Return the array member of interest, if at end of path. + if i == lastkey { + vals = vals[keys[i].position:(keys[i].position + 1)] + break + } + + // Extract the array member of interest. + am := vals[keys[i].position:(keys[i].position + 1)] + + // must be a map[string]interface{} value so we can keep walking the path + amm, ok := am[0].(map[string]interface{}) + if !ok { + vals = nil + break + } + + m = Map(amm) + haveFirst = false + } + + return vals, nil +} + +type key struct { + name string + isArray bool + position int +} + +func parsePath(s string) ([]*key, error) { + keys := strings.Split(s, ".") + + ret := make([]*key, 0) + + for i := 0; i < len(keys); i++ { + if keys[i] == "" { + continue + } + + newkey := new(key) + if strings.Index(keys[i], "[") < 0 { + newkey.name = keys[i] + ret = append(ret, newkey) + continue + } + + p := strings.Split(keys[i], "[") + newkey.name = p[0] + p = strings.Split(p[1], "]") + if p[0] == "" { // no right bracket + return nil, fmt.Errorf("no right bracket on key index: %s", keys[i]) + } + // convert p[0] to a int value + pos, nerr := strconv.ParseInt(p[0], 10, 32) + if nerr != nil { + return nil, fmt.Errorf("cannot convert index to int value: %s", p[0]) + } + newkey.position = int(pos) + newkey.isArray = true + ret = append(ret, newkey) + } + + return ret, nil +} + +// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'. +func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys := strings.Split(path, ".") + if keys[len(keys)-1] == "" { + keys = keys[:len(keys)-1] + } + ivals := make([]interface{}, 0, defaultArraySize) + var cnt int + valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap) + return ivals[:cnt], nil +} + +func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) { + lenKeys := len(keys) + + // load 'm' values into 'ret' + // expand any lists + if lenKeys == 0 { + switch m.(type) { + case map[string]interface{}: + if subkeys != nil { + if ok := hasSubKeys(m, subkeys); !ok { + return + } + } + *ret = append(*ret, m) + *cnt++ + case []interface{}: + for i, v := range m.([]interface{}) { + if subkeys != nil { + if ok := hasSubKeys(v, subkeys); !ok { + continue // only load list members with subkeys + } + } + *ret = append(*ret, (m.([]interface{}))[i]) + *cnt++ + } + default: + if subkeys != nil { + return // must be map[string]interface{} if there are subkeys + } + *ret = append(*ret, m) + *cnt++ + } + return + } + + // key of interest + key := keys[0] + switch key { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + default: + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + } + } + } + } +} + +// hasSubKeys() - interface{} equality works for string, float64, bool +// 'v' must be a map[string]interface{} value to have subkeys +// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard. +func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool { + if len(subkeys) == 0 { + return true + } + + switch v.(type) { + case map[string]interface{}: + // do all subKey name:value pairs match? + mv := v.(map[string]interface{}) + for skey, sval := range subkeys { + isNotKey := false + if skey[:1] == "!" { // a NOT-key + skey = skey[1:] + isNotKey = true + } + vv, ok := mv[skey] + if !ok { // key doesn't exist + if isNotKey { // key not there, but that's what we want + if kv, ok := sval.(string); ok && kv == "*" { + continue + } + } + return false + } + // wildcard check + if kv, ok := sval.(string); ok && kv == "*" { + if isNotKey { // key is there, and we don't want it + return false + } + continue + } + switch sval.(type) { + case string: + if s, ok := vv.(string); ok && s == sval.(string) { + if isNotKey { + return false + } + continue + } + case bool: + if b, ok := vv.(bool); ok && b == sval.(bool) { + if isNotKey { + return false + } + continue + } + case float64: + if f, ok := vv.(float64); ok && f == sval.(float64) { + if isNotKey { + return false + } + continue + } + } + // key there but didn't match subkey value + if isNotKey { // that's what we want + continue + } + return false + } + // all subkeys matched + return true + } + + // not a map[string]interface{} value, can't have subkeys + return false +} + +// Generate map of key:value entries as map[string]string. +// 'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'. +// If len(kv) == 0, the return is (nil, nil). +func getSubKeyMap(kv ...string) (map[string]interface{}, error) { + if len(kv) == 0 { + return nil, nil + } + m := make(map[string]interface{}, 0) + for _, v := range kv { + vv := strings.Split(v, fieldSep) + switch len(vv) { + case 2: + m[vv[0]] = interface{}(vv[1]) + case 3: + switch vv[2] { + case "string", "char", "text": + m[vv[0]] = interface{}(vv[1]) + case "bool", "boolean": + // ParseBool treats "1"==true & "0"==false + b, err := strconv.ParseBool(vv[1]) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1]) + } + m[vv[0]] = interface{}(b) + case "float", "float64", "num", "number", "numeric": + f, err := strconv.ParseFloat(vv[1], 64) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1]) + } + m[vv[0]] = interface{}(f) + default: + return nil, fmt.Errorf("unknown subkey conversion spec: %s", v) + } + default: + return nil, fmt.Errorf("unknown subkey spec: %s", v) + } + } + return m, nil +} + +// ------------------------------- END of valuesFor ... ---------------------------- + +// ----------------------- locate where a key value is in the tree ------------------- + +//----------------------------- find all paths to a key -------------------------------- + +// Get all paths through Map, 'mv', (in dot-notation) that terminate with the specified key. +// Results can be used with ValuesForPath. +func (mv Map) PathsForKey(key string) []string { + m := map[string]interface{}(mv) + breadbasket := make(map[string]bool, 0) + breadcrumbs := "" + + hasKeyPath(breadcrumbs, m, key, breadbasket) + if len(breadbasket) == 0 { + return nil + } + + // unpack map keys to return + res := make([]string, len(breadbasket)) + var i int + for k := range breadbasket { + res[i] = k + i++ + } + + return res +} + +// Extract the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'.. +// Paths are strings using dot-notation. +func (mv Map) PathForKeyShortest(key string) string { + paths := mv.PathsForKey(key) + + lp := len(paths) + if lp == 0 { + return "" + } + if lp == 1 { + return paths[0] + } + + shortest := paths[0] + shortestLen := len(strings.Split(shortest, ".")) + + for i := 1; i < len(paths); i++ { + vlen := len(strings.Split(paths[i], ".")) + if vlen < shortestLen { + shortest = paths[i] + shortestLen = vlen + } + } + + return shortest +} + +// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth +// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'. +func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + if _, ok := vv[key]; ok { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = key + } else { + nbc = crumbs + "." + key + } + basket[nbc] = true + } + // walk on down the path, key could occur again at deeper node + for k, v := range vv { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = k + } else { + nbc = crumbs + "." + k + } + hasKeyPath(nbc, v, key, basket) + } + case []interface{}: + // crumb-trail doesn't change, pass it on + for _, v := range iv.([]interface{}) { + hasKeyPath(crumbs, v, key, basket) + } + } +} + +var PathNotExistError = errors.New("Path does not exist") + +// ValueForPath wrap ValuesFor Path and returns the first value returned. +// If no value is found it returns 'nil' and PathNotExistError. +func (mv Map) ValueForPath(path string) (interface{}, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, PathNotExistError + } + return vals[0], nil +} + +// Returns the first found value for the path as a string. +func (mv Map) ValueForPathString(path string) (string, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return "", err + } + if len(vals) == 0 { + return "", errors.New("ValueForPath: path not found") + } + val := vals[0] + switch str := val.(type) { + case string: + return str, nil + default: + return "", fmt.Errorf("ValueForPath: unsupported type: %T", str) + } +} + +// Returns the first found value for the path as a string. +// If the path is not found then it returns an empty string. +func (mv Map) ValueOrEmptyForPathString(path string) string { + str, _ := mv.ValueForPathString(path) + return str +} diff --git a/vendor/github.com/clbanning/mxj/leafnode.go b/vendor/github.com/clbanning/mxj/leafnode.go new file mode 100644 index 0000000..cf413eb --- /dev/null +++ b/vendor/github.com/clbanning/mxj/leafnode.go @@ -0,0 +1,112 @@ +package mxj + +// leafnode.go - return leaf nodes with paths and values for the Map +// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw + +import ( + "strconv" + "strings" +) + +const ( + NoAttributes = true // suppress LeafNode values that are attributes +) + +// LeafNode - a terminal path value in a Map. +// For XML Map values it represents an attribute or simple element value - of type +// string unless Map was created using Cast flag. For JSON Map values it represents +// a string, numeric, boolean, or null value. +type LeafNode struct { + Path string // a dot-notation representation of the path with array subscripting + Value interface{} // the value at the path termination +} + +// LeafNodes - returns an array of all LeafNode values for the Map. +// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-') +// as well as the "#text" key for the associated simple element value. +// +// PrependAttrWithHypen(false) will result in attributes having .attr-name as +// terminal node in 'path' while the path for the element value, itself, will be +// the base path w/o "#text". +// +// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax +// rather than "[N]" syntax. +func (mv Map) LeafNodes(no_attr ...bool) []LeafNode { + var a bool + if len(no_attr) == 1 { + a = no_attr[0] + } + + l := make([]LeafNode, 0) + getLeafNodes("", "", map[string]interface{}(mv), &l, a) + return l +} + +func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) { + // if stripping attributes, then also strip "#text" key + if !noattr || node != "#text" { + if path != "" && node[:1] != "[" { + path += "." + } + path += node + } + switch mv.(type) { + case map[string]interface{}: + for k, v := range mv.(map[string]interface{}) { + // if noattr && k[:1] == "-" { + if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue + } + getLeafNodes(path, k, v, l, noattr) + } + case []interface{}: + for i, v := range mv.([]interface{}) { + if useDotNotation { + getLeafNodes(path, strconv.Itoa(i), v, l, noattr) + } else { + getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr) + } + } + default: + // can't walk any further, so create leaf + n := LeafNode{path, mv} + *l = append(*l, n) + } +} + +// LeafPaths - all paths that terminate in LeafNode values. +func (mv Map) LeafPaths(no_attr ...bool) []string { + ln := mv.LeafNodes() + ss := make([]string, len(ln)) + for i := 0; i < len(ln); i++ { + ss[i] = ln[i].Path + } + return ss +} + +// LeafValues - all terminal values in the Map. +func (mv Map) LeafValues(no_attr ...bool) []interface{} { + ln := mv.LeafNodes() + vv := make([]interface{}, len(ln)) + for i := 0; i < len(ln); i++ { + vv[i] = ln[i].Value + } + return vv +} + +// ====================== utilities ====================== + +// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I +var useDotNotation bool + +// LeafUseDotNotation sets a flag that list members in LeafNode paths +// should be identified using ".N" syntax rather than the default "[N]" +// syntax. Calling LeafUseDotNotation with no arguments toggles the +// flag on/off; otherwise, the argument sets the flag value 'true'/'false'. +func LeafUseDotNotation(b ...bool) { + if len(b) == 0 { + useDotNotation = !useDotNotation + return + } + useDotNotation = b[0] +} diff --git a/vendor/github.com/clbanning/mxj/misc.go b/vendor/github.com/clbanning/mxj/misc.go new file mode 100644 index 0000000..5b4fab2 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/misc.go @@ -0,0 +1,86 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// misc.go - mimic functions (+others) called out in: +// https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ +// Primarily these methods let you retrive XML structure information. + +package mxj + +import ( + "fmt" + "sort" + "strings" +) + +// Return the root element of the Map. If there is not a single key in Map, +// then an error is returned. +func (mv Map) Root() (string, error) { + mm := map[string]interface{}(mv) + if len(mm) != 1 { + return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm)) + } + for k, _ := range mm { + return k, nil + } + return "", nil +} + +// If the path is an element with sub-elements, return a list of the sub-element +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are prefixed with +// '-', a hyphen, are considered attributes; see m.Attributes(path). +func (mv Map) Elements(path string) ([]string, error) { + e, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch e.(type) { + case map[string]interface{}: + ee := e.(map[string]interface{}) + elems := make([]string, len(ee)) + var i int + for k, _ := range ee { + if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue // skip attributes + } + elems[i] = k + i++ + } + elems = elems[:i] + // alphabetic sort keeps things tidy + sort.Strings(elems) + return elems, nil + } + return nil, fmt.Errorf("no elements for path: %s", path) +} + +// If the path is an element with attributes, return a list of the attribute +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are not prefixed with +// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the +// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then +// there are no identifiable attributes. +func (mv Map) Attributes(path string) ([]string, error) { + a, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch a.(type) { + case map[string]interface{}: + aa := a.(map[string]interface{}) + attrs := make([]string, len(aa)) + var i int + for k, _ := range aa { + if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 { + continue // skip non-attributes + } + attrs[i] = k[len(attrPrefix):] + i++ + } + attrs = attrs[:i] + // alphabetic sort keeps things tidy + sort.Strings(attrs) + return attrs, nil + } + return nil, fmt.Errorf("no attributes for path: %s", path) +} diff --git a/vendor/github.com/clbanning/mxj/mxj.go b/vendor/github.com/clbanning/mxj/mxj.go new file mode 100644 index 0000000..f0592f0 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/mxj.go @@ -0,0 +1,128 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "fmt" + "sort" +) + +const ( + Cast = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast) + SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding) +) + +type Map map[string]interface{} + +// Allocate a Map. +func New() Map { + m := make(map[string]interface{}, 0) + return m +} + +// Cast a Map to map[string]interface{} +func (mv Map) Old() map[string]interface{} { + return mv +} + +// Return a copy of mv as a newly allocated Map. If the Map only contains string, +// numeric, map[string]interface{}, and []interface{} values, then it can be thought +// of as a "deep copy." Copying a structure (or structure reference) value is subject +// to the noted restrictions. +// NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags +// then only public fields of the structure are in the new Map - and with +// keys that conform to any encoding tag instructions. The structure itself will +// be represented as a map[string]interface{} value. +func (mv Map) Copy() (Map, error) { + // this is the poor-man's deep copy + // not efficient, but it works + j, jerr := mv.Json() + // must handle, we don't know how mv got built + if jerr != nil { + return nil, jerr + } + return NewMapJson(j) +} + +// --------------- StringIndent ... from x2j.WriteMap ------------- + +// Pretty print a Map. +func (mv Map) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(mv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (mv Map) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(mv), false, true, offset...) +} + +// writeMap - dumps the map[string]interface{} for examination. +// 'typeInfo' causes value type to be printed. +// 'offset' is initial indentation count; typically: Write(m). +func writeMap(m interface{}, typeInfo, root bool, offset ...int) string { + var indent int + if len(offset) == 1 { + indent = offset[0] + } + + var s string + switch m.(type) { + case []interface{}: + if typeInfo { + s += "[[]interface{}]" + } + for _, v := range m.([]interface{}) { + s += "\n" + for i := 0; i < indent; i++ { + s += " " + } + s += writeMap(v, typeInfo, false, indent+1) + } + case map[string]interface{}: + list := make([][2]string, len(m.(map[string]interface{}))) + var n int + for k, v := range m.(map[string]interface{}) { + list[n][0] = k + list[n][1] = writeMap(v, typeInfo, false, indent+1) + n++ + } + sort.Sort(mapList(list)) + for _, v := range list { + if root { + root = false + } else { + s += "\n" + } + for i := 0; i < indent; i++ { + s += " " + } + s += v[0] + " : " + v[1] + } + default: + if typeInfo { + s += fmt.Sprintf("[%T] %+v", m, m) + } else { + s += fmt.Sprintf("%+v", m) + } + } + return s +} + +// ======================== utility =============== + +type mapList [][2]string + +func (ml mapList) Len() int { + return len(ml) +} + +func (ml mapList) Swap(i, j int) { + ml[i], ml[j] = ml[j], ml[i] +} + +func (ml mapList) Less(i, j int) bool { + return ml[i][0] <= ml[j][0] +} diff --git a/vendor/github.com/clbanning/mxj/newmap.go b/vendor/github.com/clbanning/mxj/newmap.go new file mode 100644 index 0000000..b293949 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/newmap.go @@ -0,0 +1,184 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings +// keys can use dot-notation, keyOld can use wildcard, '*' +// +// Computational strategy - +// Using the key path - []string - traverse a new map[string]interface{} and +// insert the oldVal as the newVal when we arrive at the end of the path. +// If the type at the end is nil, then that is newVal +// If the type at the end is a singleton (string, float64, bool) an array is created. +// If the type at the end is an array, newVal is just appended. +// If the type at the end is a map, it is inserted if possible or the map value +// is converted into an array if necessary. + +package mxj + +import ( + "errors" + "strings" +) + +// (Map)NewMap - create a new Map from data in the current Map. +// 'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey' +// should be the value for 'newKey' in the returned Map. +// - 'oldKey' supports dot-notation as described for (Map)ValuesForPath() +// - 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays +// - "oldKey" is shorthand for the keypair value "oldKey:oldKey" +// - "oldKey:" and ":newKey" are invalid keypair values +// - if 'oldKey' does not exist in the current Map, it is not written to the new Map. +// "null" is not supported unless it is the current Map. +// - see newmap_test.go for several syntax examples +// - mv.NewMap() == mxj.New() +// +// NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc. +func (mv Map) NewMap(keypairs ...string) (Map, error) { + n := make(map[string]interface{}, 0) + if len(keypairs) == 0 { + return n, nil + } + + // loop through the pairs + var oldKey, newKey string + var path []string + for _, v := range keypairs { + if len(v) == 0 { + continue // just skip over empty keypair arguments + } + + // initialize oldKey, newKey and check + vv := strings.Split(v, ":") + if len(vv) > 2 { + return n, errors.New("oldKey:newKey keypair value not valid - " + v) + } + if len(vv) == 1 { + oldKey, newKey = vv[0], vv[0] + } else { + oldKey, newKey = vv[0], vv[1] + } + strings.TrimSpace(oldKey) + strings.TrimSpace(newKey) + if i := strings.Index(newKey, "*"); i > -1 { + return n, errors.New("newKey value cannot contain wildcard character - " + v) + } + if i := strings.Index(newKey, "["); i > -1 { + return n, errors.New("newKey value cannot contain indexed arrays - " + v) + } + if oldKey == "" || newKey == "" { + return n, errors.New("oldKey or newKey is not specified - " + v) + } + + // get oldKey value + oldVal, err := mv.ValuesForPath(oldKey) + if err != nil { + return n, err + } + if len(oldVal) == 0 { + continue // oldKey has no value, may not exist in mv + } + + // break down path + path = strings.Split(newKey, ".") + if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec + path = path[:len(path)-1] + } + + addNewVal(&n, path, oldVal) + } + + return n, nil +} + +// navigate 'n' to end of path and add val +func addNewVal(n *map[string]interface{}, path []string, val []interface{}) { + // newVal - either singleton or array + var newVal interface{} + if len(val) == 1 { + newVal = val[0] // is type interface{} + } else { + newVal = interface{}(val) + } + + // walk to the position of interest, create it if necessary + m := (*n) // initialize map walker + var k string // key for m + lp := len(path) - 1 // when to stop looking + for i := 0; i < len(path); i++ { + k = path[i] + if i == lp { + break + } + var nm map[string]interface{} // holds position of next-map + switch m[k].(type) { + case nil: // need a map for next node in path, so go there + nm = make(map[string]interface{}, 0) + m[k] = interface{}(nm) + m = m[k].(map[string]interface{}) + case map[string]interface{}: + // OK - got somewhere to walk to, go there + m = m[k].(map[string]interface{}) + case []interface{}: + // add a map and nm points to new map unless there's already + // a map in the array, then nm points there + // The placement of the next value in the array is dependent + // on the sequence of members - could land on a map or a nil + // value first. TODO: how to test this. + a := make([]interface{}, 0) + var foundmap bool + for _, vv := range m[k].([]interface{}) { + switch vv.(type) { + case nil: // doesn't appear that this occurs, need a test case + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + foundmap = true + case map[string]interface{}: + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = vv.(map[string]interface{}) + a = append(a, vv) + foundmap = true + default: + a = append(a, vv) + } + } + // no map found in array + if !foundmap { + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + } + m[k] = interface{}(a) // must insert in map + m = nm + default: // it's a string, float, bool, etc. + aa := make([]interface{}, 0) + nm = make(map[string]interface{}, 0) + aa = append(aa, m[k], nm) + m[k] = interface{}(aa) + m = nm + } + } + + // value is nil, array or a singleton of some kind + // initially m.(type) == map[string]interface{} + v := m[k] + switch v.(type) { + case nil: // initialized + m[k] = newVal + case []interface{}: + a := m[k].([]interface{}) + a = append(a, newVal) + m[k] = interface{}(a) + default: // v exists:string, float64, bool, map[string]interface, etc. + a := make([]interface{}, 0) + a = append(a, v, newVal) + m[k] = interface{}(a) + } +} diff --git a/vendor/github.com/clbanning/mxj/readme.md b/vendor/github.com/clbanning/mxj/readme.md new file mode 100644 index 0000000..6bb21dc --- /dev/null +++ b/vendor/github.com/clbanning/mxj/readme.md @@ -0,0 +1,179 @@ +

mxj - to/from maps, XML and JSON

+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages. + +

Related Packages

+ +https://github.com/clbanning/checkxml provides functions for validating XML data. + +

Refactor Decoder - 2015.11.15

+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant. I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi. Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value. As shown by: + + BenchmarkNewMapXml-4 100000 18043 ns/op + BenchmarkNewStructXml-4 100000 14892 ns/op + BenchmarkNewMapJson-4 300000 4633 ns/op + BenchmarkNewStructJson-4 300000 3427 ns/op + BenchmarkNewMapXmlBooks-4 20000 82850 ns/op + BenchmarkNewStructXmlBooks-4 20000 67822 ns/op + BenchmarkNewMapJsonBooks-4 100000 17222 ns/op + BenchmarkNewStructJsonBooks-4 100000 15309 ns/op + +

Notices

+ + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq() + and mv.XmlSeq() / mv.XmlSeqIndent(). + 2015-05-20: New: mv.StringIndentNoTypeInfo(). + Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(), + mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo(). + 2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information. + (NOTE: PreserveXmlList() is similar and will be here soon.) + 2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + 2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references. + +

Basic Unmarshal XML to map[string]interface{}

+
type Map map[string]interface{}
+ +Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v': +
mv := Map(v)
+ +Unmarshal / marshal XML as a `Map` value, 'mv': +
mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal
+ +Unmarshal XML from an `io.Reader` as a `Map` value, 'mv': +
mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+ +Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`): +
err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+ +Also, for prettified output: +
xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+ +Bulk process XML with error handling (note: handlers must return a boolean value): +
err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+ +Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`. + +There are comparable functions and methods for JSON processing. + +Arbitrary structure values can be decoded to / encoded from `Map` values: +
mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)
+ +

Extract / modify Map values

+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON +or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then: +
paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+ +Get everything at once, irrespective of path depth: +
leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()
+ +A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML +or JSON. (Note: keys can use dot-notation.) +
newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto
+ +

Usage

+ +The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj). + +Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions. + +

XML parsing conventions

+ +Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + +Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - `something` parses to `map["ns:key"]interface{}{"something"}` + - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}` + +Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +

XML encoding conventions

+ + - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as ``. + NOTE: the operation is not symmetric as `` elements are decoded as `tag:""` `Map` values, + which, then, encode in JSON as `"tag":""` values. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +

Running "go test"

+ +Because there are no guarantees on the sequence map elements are retrieved, the tests have been +written for visual verification in most cases. One advantage is that you can easily use the +output from running "go test" as examples of calling the various functions and methods. + +

Motivation

+ +I make extensive use of JSON for messaging and typically unmarshal the messages into +`map[string]interface{}` values. This is easily done using `json.Unmarshal` from the +standard Go libraries. Unfortunately, many legacy solutions use structured +XML messages; in those environments the applications would have to be refactored to +interoperate with my components. + +The better solution is to just provide an alternative HTTP handler that receives +XML messages and parses it into a `map[string]interface{}` value and then reuse +all the JSON-based code. The Go `xml.Unmarshal()` function does not provide the same +option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote +a couple of small functions to fill this gap and released them as the x2j package. + +Over the next year and a half additional features were added, and the companion j2x +package was released to address XML encoding of arbitrary JSON and `map[string]interface{}` +values. As part of a refactoring of our production system and looking at how we had been +using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or +JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}` +values was the primary value. Thus, everything was refactored into the mxj package. + diff --git a/vendor/github.com/clbanning/mxj/remove.go b/vendor/github.com/clbanning/mxj/remove.go new file mode 100644 index 0000000..8362ab1 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/remove.go @@ -0,0 +1,37 @@ +package mxj + +import "strings" + +// Removes the path. +func (mv Map) Remove(path string) error { + m := map[string]interface{}(mv) + return remove(m, path) +} + +func remove(m interface{}, path string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + lastKey := lastKey(path) + delete(val, lastKey) + + return nil +} + +// returns the last key of the path. +// lastKey("a.b.c") would had returned "c" +func lastKey(path string) string { + keys := strings.Split(path, ".") + key := keys[len(keys)-1] + return key +} + +// returns the path without the last key +// parentPath("a.b.c") whould had returned "a.b" +func parentPath(path string) string { + keys := strings.Split(path, ".") + parentPath := strings.Join(keys[0:len(keys)-1], ".") + return parentPath +} diff --git a/vendor/github.com/clbanning/mxj/rename.go b/vendor/github.com/clbanning/mxj/rename.go new file mode 100644 index 0000000..e95a963 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/rename.go @@ -0,0 +1,54 @@ +package mxj + +import ( + "errors" + "strings" +) + +// RenameKey renames a key in a Map. +// It works only for nested maps. It doesn't work for cases when it buried in a list. +func (mv Map) RenameKey(path string, newName string) error { + if !mv.Exists(path) { + return errors.New("RenameKey: path not found: " + path) + } + if mv.Exists(parentPath(path) + "." + newName) { + return errors.New("RenameKey: key already exists: " + newName) + } + + m := map[string]interface{}(mv) + return renameKey(m, path, newName) +} + +func renameKey(m interface{}, path string, newName string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + oldName := lastKey(path) + val[newName] = val[oldName] + delete(val, oldName) + + return nil +} + +// returns a value which contains a last key in the path +// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3} +func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) { + keys := strings.Split(path, ".") + + switch mValue := m.(type) { + case map[string]interface{}: + for key, value := range mValue { + if key == keys[0] { + if len(keys) == 1 { + return mValue, nil + } else { + // keep looking for the full path to the key + return prevValueByPath(value, strings.Join(keys[1:], ".")) + } + } + } + } + return nil, errors.New("prevValueByPath: didn't find path – " + path) +} diff --git a/vendor/github.com/clbanning/mxj/set.go b/vendor/github.com/clbanning/mxj/set.go new file mode 100644 index 0000000..a297fc3 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/set.go @@ -0,0 +1,26 @@ +package mxj + +import ( + "strings" +) + +// Sets the value for the path +func (mv Map) SetValueForPath(value interface{}, path string) error { + pathAry := strings.Split(path, ".") + parentPathAry := pathAry[0 : len(pathAry)-1] + parentPath := strings.Join(parentPathAry, ".") + + val, err := mv.ValueForPath(parentPath) + if err != nil { + return err + } + if val == nil { + return nil // we just ignore the request if there's no val + } + + key := pathAry[len(pathAry)-1] + cVal := val.(map[string]interface{}) + cVal[key] = value + + return nil +} diff --git a/vendor/github.com/clbanning/mxj/setfieldsep.go b/vendor/github.com/clbanning/mxj/setfieldsep.go new file mode 100644 index 0000000..b70715e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/setfieldsep.go @@ -0,0 +1,20 @@ +package mxj + +// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862 +var fieldSep string = ":" + +// SetFieldSeparator changes the default field separator, ":", for the +// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments +// in mv.ValuesForKey and mv.ValuesForPath. +// +// E.g., if the newVal value is "http://blah/blah", setting the field separator +// to "|" will allow the newVal specification, "|http://blah/blah" to parse +// properly. If called with no argument or an empty string value, the field +// separator is set to the default, ":". +func SetFieldSeparator(s ...string) { + if len(s) == 0 || s[0] == "" { + fieldSep = ":" // the default + return + } + fieldSep = s[0] +} diff --git a/vendor/github.com/clbanning/mxj/songtext.xml b/vendor/github.com/clbanning/mxj/songtext.xml new file mode 100644 index 0000000..8c0f2be --- /dev/null +++ b/vendor/github.com/clbanning/mxj/songtext.xml @@ -0,0 +1,29 @@ + + help me! + + + + Henry was a renegade + Didn't like to play it safe + One component at a time + There's got to be a better way + Oh, people came from miles around + Searching for a steady job + Welcome to the Motor Town + Booming like an atom bomb + + + Oh, Henry was the end of the story + Then everything went wrong + And we'll return it to its former glory + But it just takes so long + + + + It's going to take a long time + It's going to take it, but we'll make it one day + It's going to take a long time + It's going to take it, but we'll make it one day + + + diff --git a/vendor/github.com/clbanning/mxj/strict.go b/vendor/github.com/clbanning/mxj/strict.go new file mode 100644 index 0000000..1e76956 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/strict.go @@ -0,0 +1,30 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// strict.go actually addresses setting xml.Decoder attribute +// values. This'll let you parse non-standard XML. + +package mxj + +import ( + "encoding/xml" +) + +// CustomDecoder can be used to specify xml.Decoder attribute +// values, e.g., Strict:false, to be used. By default CustomDecoder +// is nil. If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is +// ignored and must be set as part of the CustomDecoder value, if needed. +// Usage: +// mxj.CustomDecoder = &xml.Decoder{Strict:false} +var CustomDecoder *xml.Decoder + +// useCustomDecoder copy over public attributes from customDecoder +func useCustomDecoder(d *xml.Decoder) { + d.Strict = CustomDecoder.Strict + d.AutoClose = CustomDecoder.AutoClose + d.Entity = CustomDecoder.Entity + d.CharsetReader = CustomDecoder.CharsetReader + d.DefaultSpace = CustomDecoder.DefaultSpace +} + diff --git a/vendor/github.com/clbanning/mxj/struct.go b/vendor/github.com/clbanning/mxj/struct.go new file mode 100644 index 0000000..9be636c --- /dev/null +++ b/vendor/github.com/clbanning/mxj/struct.go @@ -0,0 +1,54 @@ +// Copyright 2012-2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "encoding/json" + "errors" + "reflect" + + // "github.com/fatih/structs" +) + +// Create a new Map value from a structure. Error returned if argument is not a structure. +// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map +// for handling of "structs" tags. + +// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map. +// import "github.com/fatih/structs" +// ... +// sm, err := structs.Map() +// if err != nil { +// // handle error +// } +// m := mxj.Map(sm) +// Alernatively uncomment the old source and import in struct.go. +func NewMapStruct(structVal interface{}) (Map, error) { + return nil, errors.New("deprecated - see package documentation") + /* + if !structs.IsStruct(structVal) { + return nil, errors.New("NewMapStruct() error: argument is not type Struct") + } + return structs.Map(structVal), nil + */ +} + +// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned +// if argument is not a pointer or if json.Unmarshal returns an error. +// json.Unmarshal structure encoding rules are followed to encode public structure fields. +func (mv Map) Struct(structPtr interface{}) error { + // should check that we're getting a pointer. + if reflect.ValueOf(structPtr).Kind() != reflect.Ptr { + return errors.New("mv.Struct() error: argument is not type Ptr") + } + + m := map[string]interface{}(mv) + j, err := json.Marshal(m) + if err != nil { + return err + } + + return json.Unmarshal(j, structPtr) +} diff --git a/vendor/github.com/clbanning/mxj/updatevalues.go b/vendor/github.com/clbanning/mxj/updatevalues.go new file mode 100644 index 0000000..46779f4 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/updatevalues.go @@ -0,0 +1,256 @@ +// Copyright 2012-2014, 2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// updatevalues.go - modify a value based on path and possibly sub-keys +// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values. + +package mxj + +import ( + "fmt" + "strconv" + "strings" +) + +// Update value based on path and possible sub-key values. +// A count of the number of values changed and any error are returned. +// If the count == 0, then no path (and subkeys) matched. +// 'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified +// or a string value "key:value[:type]" where type is "bool" or "num" to cast the value. +// 'path' is dot-notation list of keys to traverse; last key in path can be newVal key +// NOTE: 'path' spec does not currently support indexed array references. +// 'subkeys' are "key:value[:type]" entries that must match for path node +// The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// +// NOTES: +// 1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value. +// 2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key. +// 3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol, +// perhaps "|". +func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) { + m := map[string]interface{}(mv) + + // extract the subkeys + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return 0, err + } + } + + // extract key and value from newVal + var key string + var val interface{} + switch newVal.(type) { + case map[string]interface{}, Map: + switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec) + case Map: + newVal = newVal.(Map).Old() + } + if len(newVal.(map[string]interface{})) != 1 { + return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal) + } + for key, val = range newVal.(map[string]interface{}) { + } + case string: // split it as a key:value pair + ss := strings.Split(newVal.(string), fieldSep) + n := len(ss) + if n < 2 || n > 3 { + return 0, fmt.Errorf("unknown newVal spec - %+v", newVal) + } + key = ss[0] + if n == 2 { + val = interface{}(ss[1]) + } else if n == 3 { + switch ss[2] { + case "bool", "boolean": + nv, err := strconv.ParseBool(ss[1]) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal) + } + val = interface{}(nv) + case "num", "numeric", "float", "int": + nv, err := strconv.ParseFloat(ss[1], 64) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal) + } + val = interface{}(nv) + default: + return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal) + } + } + default: + return 0, fmt.Errorf("invalid newVal type - %+v", newVal) + } + + // parse path + keys := strings.Split(path, ".") + + var count int + updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count) + + return count, nil +} + +// navigate the path +func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) { + // ----- at end node: looking at possible node to get 'key' ---- + if len(keys) == 1 { + updateValue(key, value, m, keys[0], subkeys, cnt) + return + } + + // ----- here we are navigating the path thru the penultimate node -------- + // key of interest is keys[0] - the next in the path + switch keys[0] { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + default: + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + } + } + } + } +} + +// change value if key and subkeys are present +func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) { + // there are two possible options for the value of 'keys0': map[string]interface, []interface{} + // and 'key' is a key in the map or is a key in a map in a list. + switch m.(type) { + case map[string]interface{}: // gotta have the last key + if keys0 == "*" { + for k := range m.(map[string]interface{}) { + updateValue(key, value, m, k, subkeys, cnt) + } + return + } + endVal, _ := m.(map[string]interface{})[keys0] + + // if newV key is the end of path, replace the value for path-end + // may be []interface{} - means replace just an entry w/ subkeys + // otherwise replace the keys0 value if subkeys are there + // NOTE: this will replace the subkeys, also + if key == keys0 { + switch endVal.(type) { + case map[string]interface{}: + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + case []interface{}: + // without subkeys can't select list member to modify + // so key:value spec is it ... + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + break + } + nv := make([]interface{}, 0) + var valmodified bool + for _, v := range endVal.([]interface{}) { + // check entry subkeys + if hasSubKeys(v, subkeys) { + // replace v with value + nv = append(nv, value) + valmodified = true + (*cnt)++ + continue + } + nv = append(nv, v) + } + if valmodified { + (m.(map[string]interface{}))[keys0] = interface{}(nv) + } + default: // anything else is a strict replacement + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + } + return + } + + // so value is for an element of endVal + // if endVal is a map then 'key' must be there w/ subkeys + // if endVal is a list then 'key' must be in a list member w/ subkeys + switch endVal.(type) { + case map[string]interface{}: + if !hasSubKeys(endVal, subkeys) { + return + } + if _, ok := (endVal.(map[string]interface{}))[key]; ok { + (endVal.(map[string]interface{}))[key] = value + (*cnt)++ + } + case []interface{}: // keys0 points to a list, check subkeys + for _, v := range endVal.([]interface{}) { + // got to be a map so we can replace value for 'key' + vv, vok := v.(map[string]interface{}) + if !vok { + continue + } + if _, ok := vv[key]; !ok { + continue + } + if !hasSubKeys(vv, subkeys) { + continue + } + vv[key] = value + (*cnt)++ + } + } + case []interface{}: // key may be in a list member + // don't need to handle keys0 == "*"; we're looking at everything, anyway. + for _, v := range m.([]interface{}) { + // only map values - we're looking for 'key' + mm, ok := v.(map[string]interface{}) + if !ok { + continue + } + if _, ok := mm[key]; !ok { + continue + } + if !hasSubKeys(mm, subkeys) { + continue + } + mm[key] = value + (*cnt)++ + } + } + + // return +} diff --git a/vendor/github.com/clbanning/mxj/xml.go b/vendor/github.com/clbanning/mxj/xml.go new file mode 100644 index 0000000..fac0f1d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/xml.go @@ -0,0 +1,1139 @@ +// Copyright 2012-2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xml.go - basically the core of X2j for map[string]interface{} values. +// NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter +// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages. + +package mxj + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ------------------- NewMapXml & NewMapXmlReader ... ------------------------- + +// If XmlCharsetReader != nil, it will be used to decode the XML, if required. +// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored; +// set the CustomDecoder attribute instead. +// import ( +// charset "code.google.com/p/go-charset/charset" +// github.com/clbanning/mxj +// ) +// ... +// mxj.XmlCharsetReader = charset.NewReader +// m, merr := mxj.NewMapXml(xmlValue) +var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error) + +// NewMapXml - convert a XML doc into a Map +// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().) +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// +// Converting XML to JSON is a simple as: +// ... +// mapVal, merr := mxj.NewMapXml(xmlVal) +// if merr != nil { +// // handle error +// } +// jsonVal, jerr := mapVal.Json() +// if jerr != nil { +// // handle error +// } +// +// NOTES: +// 1. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlToMap(xmlVal, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlReaderToMap(xmlReader, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) // see code at EOF + + m, err := xmlReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + if err != nil { + return nil, b, err + } + + return m, b, nil +} + +// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// xmlToMap - convert a XML doc into map[string]interface{} value +func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// PrependAttrWithHyphen. Prepend attribute tags with a hyphen. +// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.) +// Note: +// If 'false', unmarshaling and marshaling is not symmetric. Attributes will be +// marshal'd as attr and may be part of a list. +func PrependAttrWithHyphen(v bool) { + if v { + attrPrefix = "-" + lenAttrPrefix = len(attrPrefix) + return + } + attrPrefix = "" + lenAttrPrefix = len(attrPrefix) +} + +// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com. +var includeTagSeqNum bool + +// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting +// its position when parsed. This is of limited usefulness, since list values cannot +// be tagged with "_seq" without changing their depth in the Map. +// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what +// you get. +/* + + + + + hello + + + parses as: + + { + Obj:{ + "-c":"la", + "-h":"da", + "-x":"dee", + "intObj":[ + { + "-id"="3", + "_seq":"0" // if mxj.Cast is passed, then: "_seq":0 + }, + { + "-id"="2", + "_seq":"2" + }], + "intObj1":{ + "-id":"1", + "_seq":"1" + }, + "StrObj":{ + "#text":"hello", // simple element value gets "#text" tag + "_seq":"3" + } + } + } +*/ +func IncludeTagSeqNum(b bool) { + includeTagSeqNum = b +} + +// all keys will be "lower case" +var lowerCase bool + +// Coerce all tag values to keys in lower case. This is useful if you've got sources with variable +// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec +// in lower case. +// CoerceKeysToLower() will toggle the coercion flag true|false - on|off +// CoerceKeysToLower(true|false) will set the coercion flag on|off +// +// NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as +// the associated HandleXmlReader and HandleXmlReaderRaw. +func CoerceKeysToLower(b ...bool) { + if len(b) == 0 { + lowerCase = !lowerCase + } else if len(b) == 1 { + lowerCase = b[0] + } +} + +// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels. +// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "", +// and adding a SetAttrPrefix(s string) function. + +var attrPrefix string = `-` // the default +var lenAttrPrefix int = 1 // the default + +// SetAttrPrefix changes the default, "-", to the specified value, s. +// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false). +// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.) +func SetAttrPrefix(s string) { + attrPrefix = s + lenAttrPrefix = len(attrPrefix) +} + +// 18jan17: Allows user to specify if the map keys should be in snake case instead +// of the default hyphenated notation. +var snakeCaseKeys bool + +// CoerceKeysToSnakeCase changes the default, false, to the specified value, b. +// Note: the attribute prefix will be a hyphen, '-', or what ever string value has +// been specified using SetAttrPrefix. +func CoerceKeysToSnakeCase(b ...bool) { + if len(b) == 0 { + snakeCaseKeys = !snakeCaseKeys + } else if len(b) == 1 { + snakeCaseKeys = b[0] + } +} + +// 05feb17: support processing XMPP streams (issue #36) +var handleXMPPStreamTag bool + +// HandleXMPPStreamTag causes decoder to parse XMPP elements. +// If called with no argument, XMPP stream element handling is toggled on/off. +// (See xmppStream_test.go for example.) +// If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream" +// element will be returned as: +// map["stream"]interface{}{map[-]interface{}}. +// If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream" +// element will be returned as: +// map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}} +// where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.) +func HandleXMPPStreamTag(b ...bool) { + if len(b) == 0 { + handleXMPPStreamTag = !handleXMPPStreamTag + } else if len(b) == 1 { + handleXMPPStreamTag = b[0] + } +} + +// 21jan18 - decode all values as map["#text":value] (issue #56) +var decodeSimpleValuesAsMap bool + +// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":]. +// If called with no argument, the decoding is toggled on/off. +// +// By default the NewMapXml functions decode simple values without attributes as +// map[:]. This function causes simple values without attributes to be +// decoded the same as simple values with attributes - map[:map["#text":]]. +func DecodeSimpleValuesAsMap(b ...bool) { + if len(b) == 0 { + decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap + } else if len(b) == 1 { + decodeSimpleValuesAsMap = b[0] + } +} + +// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly. +// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one. +// We've removed the intermediate *node tree with the allocation and subsequent rescanning. +func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if lowerCase { + skey = strings.ToLower(skey) + } + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + // Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value. + var n, na map[string]interface{} + var seq int // for includeTagSeqNum + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + n = make(map[string]interface{}) // old n + na = make(map[string]interface{}) // old n.nodes + if len(a) > 0 { + for _, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + var key string + key = attrPrefix + v.Name.Local + if lowerCase { + key = strings.ToLower(key) + } + na[key] = cast(v.Value, r) + } + } + } + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.Token() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + return xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + // Note: + // * if CoerceKeysToLower() called, then key will be lower case. + // * if CoerceKeysToSnakeCase() called, then key will be converted to snake case. + var key string + var val interface{} + for key, val = range nn { + break + } + + // IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element. + // In theory, we don't need this if len(na) == 1. But, we don't know what might + // come next - we're only parsing forward. So if you ask for 'includeTagSeqNum' you + // get it on every element. (Personally, I never liked this, but I added it on request + // and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!) + if includeTagSeqNum { + switch val.(type) { + case []interface{}: + // noop - There's no clean way to handle this w/o changing message structure. + case map[string]interface{}: + val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val} + v["_seq"] = seq + seq++ + val = v + } + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Note: in original node-tree parser, val defaulted to ""; + // so we always had the default if len(node.nodes) == 0. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") + if len(tt) > 0 { + if len(na) > 0 || decodeSimpleValuesAsMap { + na["#text"] = cast(tt, r) + } else if skey != "" { + n[skey] = cast(tt, r) + } else { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + } + default: + // noop + } + } +} + +var castNanInf bool + +// Cast "Nan", "Inf", "-Inf" XML values to 'float64'. +// By default, these values will be decoded as 'string'. +func CastNanInf(b bool) { + castNanInf = b +} + +// cast - try to cast string values to bool or float64 +func cast(s string, r bool) interface{} { + if r { + // handle nan and inf + if !castNanInf { + switch strings.ToLower(s) { + case "nan", "inf", "-inf": + return s + } + } + + // handle numeric strings ahead of boolean + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + // ParseBool treats "1"==true & "0"==false, we've already scanned those + // values as float64. See if value has 't' or 'f' as initial screen to + // minimize calls to ParseBool; also, see if len(s) < 6. + if len(s) > 0 && len(s) < 6 { + switch s[:1] { + case "t", "T", "f", "F": + if b, err := strconv.ParseBool(s); err == nil { + return b + } + } + } + } + return s +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------ + +const ( + DefaultRootTag = "doc" +) + +var useGoXmlEmptyElemSyntax bool + +// XmlGoEmptyElemSyntax() - rather than . +// Go's encoding/xml package marshals empty XML elements as . By default this package +// encodes empty elements as . If you're marshaling Map values that include structures +// (which are passed to xml.Marshal for encoding), this will let you conform to the standard package. +func XmlGoEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = true +} + +// XmlDefaultEmptyElemSyntax() - rather than . +// Return XML encoding for empty elements to the default package setting. +// Reverses effect of XmlGoEmptyElemSyntax(). +func XmlDefaultEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = false +} + +// Encode a Map as XML. The companion of NewMapXml(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - Map keys that begin with a hyphen, '-', are interpreted as attributes. +// It is an error if the attribute doesn't have a []byte, string, number, or boolean value. +// - Map value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>". +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +// - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax(). +// The attributes tag=value pairs are alphabetized by "tag". Also, when encoding map[string]interface{} values - +// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted. +func (mv Map) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlIndent(false, s, DefaultRootTag, m, p) + } +done: + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// -------------- Handle XML stream by processing Map value -------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var xhandlerPollInterval = time.Millisecond + +// Bulk process XML using handlers that process a Map value. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapXmlReader(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process XML using handlers that process a Map value and the raw XML. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +// See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader. +func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapXmlReaderRaw(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// ----------------- END: Handle XML stream by processing Map value -------------- + +// -------- a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ---------- + +// This is a clone of io.TeeReader with the additional method t.ReadByte(). +// Thus, this TeeReader is also an io.ByteReader. +// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written +// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte().. +// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with +// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic. + +type teeReader struct { + r io.Reader + w io.Writer + b []byte +} + +func myTeeReader(r io.Reader, w io.Writer) io.Reader { + b := make([]byte, 1) + return &teeReader{r, w, b} +} + +// need for io.Reader - but we don't use it ... +func (t *teeReader) Read(p []byte) (int, error) { + return 0, nil +} + +func (t *teeReader) ReadByte() (byte, error) { + n, err := t.r.Read(t.b) + if n > 0 { + if _, err := t.w.Write(t.b[:1]); err != nil { + return t.b[0], err + } + } + return t.b[0], err +} + +// For use with NewMapXmlReader & NewMapXmlSeqReader. +type byteReader struct { + r io.Reader + b []byte +} + +func myByteReader(r io.Reader) io.Reader { + b := make([]byte, 1) + return &byteReader{r, b} +} + +// Need for io.Reader interface ... +// Needed if reading a malformed http.Request.Body - issue #38. +func (b *byteReader) Read(p []byte) (int, error) { + return b.r.Read(p) +} + +func (b *byteReader) ReadByte() (byte, error) { + _, err := b.r.Read(b.b) + if len(b.b) > 0 { + return b.b[0], err + } + var c byte + return c, err +} + +// ----------------------- END: io.TeeReader hack ----------------------------------- + +// ---------------------- XmlIndent - from j2x package ---------------------------- + +// Encode a map[string]interface{} as a pretty XML string. +// See Xml for encoding rules. +func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlIndent(true, s, DefaultRootTag, m, p) + } + return []byte(*s), err +} + +type pretty struct { + indent string + cnt int + padding string + mapDepth int + start int +} + +func (p *pretty) Indent() { + p.padding += p.indent + p.cnt++ +} + +func (p *pretty) Outdent() { + if p.cnt > 0 { + p.padding = p.padding[:len(p.padding)-len(p.indent)] + p.cnt-- + } +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var elen int + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + // per issue #48, 18apr18 - try and coerce maps to map[string]interface{} + // Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq(). + if reflect.ValueOf(value).Kind() == reflect.Map { + switch value.(type) { + case map[string]interface{}: + default: + val := make(map[string]interface{}) + vv := reflect.ValueOf(value) + keys := vv.MapKeys() + for _, k := range keys { + val[fmt.Sprint(k)] = vv.MapIndex(k).Interface() + } + value = val + } + } + + switch value.(type) { + // special handling of []interface{} values when len(value) == 0 + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number: + if doIndent { + *s += p.padding + } + *s += `<` + key + } + switch value.(type) { + case map[string]interface{}: + vv := value.(map[string]interface{}) + lenvv := len(vv) + // scan out attributes - attribute keys have prepended attrPrefix + attrlist := make([][2]string, len(vv)) + var n int + var ss string + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + switch v.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(v.(string)) + } else { + ss = v.(string) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + case float64, bool, int, int32, int64, float32, json.Number: + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = fmt.Sprintf("%v", v) + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(v.([]byte))) + } else { + ss = string(v.([]byte)) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + default: + return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v) + } + n++ + } + } + if n > 0 { + attrlist = attrlist[:n] + sort.Sort(attrList(attrlist)) + for _, v := range attrlist { + *s += ` ` + v[0] + `="` + v[1] + `"` + } + } + // only attributes? + if n == lenvv { + if useGoXmlEmptyElemSyntax { + *s += `" + } else { + *s += `/>` + } + break + } + + // simple element? Note: '#text" is an invalid XML tag. + if v, ok := vv["#text"]; ok && n+1 == lenvv { + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } + } + *s += ">" + fmt.Sprintf("%v", v) + endTag = true + elen = 1 + isSimple = true + break + } else if ok { + // Handle edge case where simple element with attributes + // is unmarshal'd using NewMapXml() where attribute prefix + // has been set to "". + // TODO(clb): should probably scan all keys for invalid chars. + return fmt.Errorf("invalid attribute key label: #text - due to attributes not being prefixed") + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + // extract the map k:v pairs and sort on key + elemlist := make([][2]interface{}, len(vv)) + n = 0 + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + continue + } + elemlist[n][0] = k + elemlist[n][1] = v + n++ + } + elemlist = elemlist[:n] + sort.Sort(elemList(elemlist)) + var i int + for _, v := range elemlist { + switch v[1].(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlIndent(doIndent, s, v[0].(string), v[1], p); err != nil { + return err + } + switch v[1].(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content ... + case []interface{}: + // special case - found during implementing Issue #23 + if len(value.([]interface{})) == 0 { + if doIndent { + *s += p.padding + p.indent + } + *s += "<" + key + elen = 0 + endTag = true + break + } + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case []string: + // This was added by https://github.com/slotix ... not a type that + // would be encountered if mv generated from NewMapXml, NewMapJson. + // Could be encountered in AnyXml(), so we'll let it stay, though + // it should be merged with case []interface{}, above. + //quick fix for []string type + //[]string should be treated exaclty as []interface{} + if len(value.([]string)) == 0 { + if doIndent { + *s += p.padding + p.indent + } + *s += "<" + key + elen = 0 + endTag = true + break + } + for _, v := range value.([]string) { + if doIndent { + p.Indent() + } + if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + v := value.(string) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case float64, bool, int, int32, int64, float32, json.Number: + v := fmt.Sprintf("%v", value) + elen = len(v) // always > 0 + *s += ">" + v + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + v := string(value.([]byte)) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + *s += ">" + v + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// ============================ sort interface implementation ================= + +type attrList [][2]string + +func (a attrList) Len() int { + return len(a) +} + +func (a attrList) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a attrList) Less(i, j int) bool { + return a[i][0] <= a[j][0] +} + +type elemList [][2]interface{} + +func (e elemList) Len() int { + return len(e) +} + +func (e elemList) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemList) Less(i, j int) bool { + return e[i][0].(string) <= e[j][0].(string) +} diff --git a/vendor/github.com/clbanning/mxj/xmlseq.go b/vendor/github.com/clbanning/mxj/xmlseq.go new file mode 100644 index 0000000..6be73ae --- /dev/null +++ b/vendor/github.com/clbanning/mxj/xmlseq.go @@ -0,0 +1,828 @@ +// Copyright 2012-2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding. +// Also, handles comments, directives and process instructions. + +package mxj + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +var NoRoot = errors.New("no root key") +var NO_ROOT = NoRoot // maintain backwards compatibility + +// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... ------------------------- + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// The xml.Decoder.RawToken method is used to parse the XML, so there is no checking for appropriate xml.EndElement values; +// thus, it is assumed that the XML is valid. +// +// NewMapXmlSeq - convert a XML doc into a Map with elements id'd with decoding sequence int - #seq. +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// NOTE: "#seq" key/value pairs are removed on encoding with mv.XmlSeq() / mv.XmlSeqIndent(). +// • attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":, "#seq":} +// • all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well. +// • lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that +// include a "#seq" k:v pair based on sequence they are decoded. Thus, XML like: +// +// value 1 +// value 2 +// value 3 +// +// is decoded as: +// doc : +// ltag :[[]interface{}] +// [item: 0] +// #seq :[int] 0 +// #text :[string] value 1 +// [item: 1] +// #seq :[int] 2 +// #text :[string] value 3 +// newtag : +// #seq :[int] 1 +// #text :[string] value 2 +// It will encode in proper sequence even though the Map representation merges all "ltag" elements in an array. +// • comments - "" - are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair. +// • directives - "" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair. +// • process instructions - "" - are decoded as map["#procinst"]interface{} where the #procinst value +// is of map[string]interface{} type with the following keys: #target, #inst, and #seq. +// • comments, directives, and procinsts that are NOT part of a document with a root key will be returned as +// map[string]interface{} and the error value 'NoRoot'. +// • note: ": tag preserve the +// ":" notation rather than stripping it as with NewMapXml(). +// 2. Attribute keys for name space prefix declarations preserve "xmlns:" notation. +func NewMapXmlSeq(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlSeqToMap(xmlVal, r) +} + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlSeqReaderToMap(xmlReader, r) +} + +// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. +// +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) + + m, err := xmlSeqReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + // err may be NoRoot + return m, b, err +} + +// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// xmlSeqToMap - convert a XML doc into map[string]interface{} value +func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly. +// Add #seq tag value for each element decoded - to be used for Encoding later. +func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + var n, na map[string]interface{} + var seq int // for including seq num when decoding + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + // 'n' only needs one slot - save call to runtime•hashGrow() + // 'na' we don't know + n = make(map[string]interface{}, 1) + na = make(map[string]interface{}) + if len(a) > 0 { + // xml.Attr is decoded into: map["#attr"]map[]interface{} + // where interface{} is map[string]interface{}{"#text":, "#seq":} + aa := make(map[string]interface{}, len(a)) + for i, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + if len(v.Name.Space) > 0 { + aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} + } else { + aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} + } + } + na["#attr"] = aa + } + } + + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream:stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.RawToken() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + if len(tt.Name.Space) > 0 { + return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + var nn map[string]interface{} + if len(tt.Name.Space) > 0 { + nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + var key string + var val interface{} + for key, val = range nn { + break + } + + // add "#seq" k:v pair - + // Sequence number included even in list elements - this should allow us + // to properly resequence even something goofy like: + // item 1 + // item 2 + // item 3 + // where all the "list" subelements are decoded into an array. + switch val.(type) { + case map[string]interface{}: + val.(map[string]interface{})["#seq"] = seq + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val, "#seq": seq} + seq++ + val = v + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + if skey != "" { + tt := t.(xml.EndElement) + if snakeCaseKeys { + tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1) + } + var name string + if len(tt.Name.Space) > 0 { + name = tt.Name.Space + `:` + tt.Name.Local + } else { + name = tt.Name.Local + } + if skey != name { + return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d", + skey, name, p.InputOffset()) + } + } + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Empty element content will be map["etag"]map["#text"]"" + // after #seq injection - map["etag"]map["#seq"]seq - after return. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") + if skey == "" { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + if len(tt) > 0 { + // every simple element is a #text and has #seq associated with it + na["#text"] = cast(tt, r) + na["#seq"] = seq + seq++ + } + case xml.Comment: + if n == nil { // no root 'key' + n = map[string]interface{}{"#comment": string(t.(xml.Comment))} + return n, NoRoot + } + cm := make(map[string]interface{}, 2) + cm["#text"] = string(t.(xml.Comment)) + cm["#seq"] = seq + seq++ + na["#comment"] = cm + case xml.Directive: + if n == nil { // no root 'key' + n = map[string]interface{}{"#directive": string(t.(xml.Directive))} + return n, NoRoot + } + dm := make(map[string]interface{}, 2) + dm["#text"] = string(t.(xml.Directive)) + dm["#seq"] = seq + seq++ + na["#directive"] = dm + case xml.ProcInst: + if n == nil { + na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)} + n = map[string]interface{}{"#procinst": na} + return n, NoRoot + } + pm := make(map[string]interface{}, 3) + pm["#target"] = t.(xml.ProcInst).Target + pm["#inst"] = string(t.(xml.ProcInst).Inst) + pm["#seq"] = seq + seq++ + na["#procinst"] = pm + default: + // noop - shouldn't ever get here, now, since we handle all token types + } + } +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// --------------------- mv.XmlSeq & mv.XmlSeqWriter ------------------------- + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Encode a Map as XML with elements sorted on #seq. The companion of NewMapXmlSeq(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - The "#seq" key is used to seqence the subelements or attributes but is ignored for writing. +// - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key. +// - The "#comment" map key identifies a comment in the value "#text" map entry - . +// - The "#directive" map key identifies a directive in the value "#text" map entry - . +// - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst" +// map entries - . +// - Value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called. +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +func (mv Map) XmlSeq(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it's an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlSeqIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + } +done: + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as XML on the Writer. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.XmlSeq(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeq(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlSeqIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See XmlSeq() for encoding rules. +func (mv Map) XmlSeqIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// ---------------------- XmlSeqIndent ---------------------------- + +// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. +// +// Encode a map[string]interface{} as a pretty XML string. +// See mv.XmlSeq() for encoding rules. +func (mv Map) XmlSeqIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlSeqIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } + return []byte(*s), err +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var noEndTag bool + var elen int + var ss string + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if doIndent { + *s += p.padding + } + if key != "#comment" && key != "#directive" && key != "#procinst" { + *s += `<` + key + } + } + switch value.(type) { + case map[string]interface{}: + val := value.(map[string]interface{}) + + if key == "#comment" { + *s += `` + noEndTag = true + break + } + + if key == "#directive" { + *s += `` + noEndTag = true + break + } + + if key == "#procinst" { + *s += `` + noEndTag = true + break + } + + haveAttrs := false + // process attributes first + if v, ok := val["#attr"].(map[string]interface{}); ok { + // First, unroll the map[string]interface{} into a []keyval array. + // Then sequence it. + kv := make([]keyval, len(v)) + n := 0 + for ak, av := range v { + kv[n] = keyval{ak, av} + n++ + } + sort.Sort(elemListSeq(kv)) + // Now encode the attributes in original decoding sequence, using keyval array. + for _, a := range kv { + vv := a.v.(map[string]interface{}) + switch vv["#text"].(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(vv["#text"].(string)) + } else { + ss = vv["#text"].(string) + } + *s += ` ` + a.k + `="` + ss + `"` + case float64, bool, int, int32, int64, float32: + *s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"` + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(vv["#text"].([]byte))) + } else { + ss = string(vv["#text"].([]byte)) + } + *s += ` ` + a.k + `="` + ss + `"` + default: + return fmt.Errorf("invalid attribute value for: %s", a.k) + } + } + haveAttrs = true + } + + // simple element? + // every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr" + _, seqOK := val["#seq"] // have key + if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK { + if stmp, ok := v.(string); ok && stmp != "" { + if xmlEscapeChars { + stmp = escapeChars(stmp) + } + *s += ">" + stmp + endTag = true + elen = 1 + } + isSimple = true + break + } else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK { + // here no #text but have #seq or #seq+#attr + endTag = false + break + } + + // we now need to sequence everything except attributes + // 'kv' will hold everything that needs to be written + kv := make([]keyval, 0) + for k, v := range val { + if k == "#attr" { // already processed + continue + } + if k == "#seq" { // ignore - just for sorting + continue + } + switch v.(type) { + case []interface{}: + // unwind the array as separate entries + for _, vv := range v.([]interface{}) { + kv = append(kv, keyval{k, vv}) + } + default: + kv = append(kv, keyval{k, v}) + } + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + sort.Sort(elemListSeq(kv)) + i := 0 + for _, v := range kv { + switch v.v.(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil { + return err + } + switch v.v.(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content other than attrs + case []interface{}: + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(value.(string)) + } else { + ss = value.(string) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + case float64, bool, int, int32, int64, float32: + v := fmt.Sprintf("%v", value) + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + if xmlEscapeChars { + ss = escapeChars(string(value.([]byte))) + } else { + ss = string(value.([]byte)) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag && !noEndTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + } else if !noEndTag { + if useGoXmlEmptyElemSyntax { + *s += `" + // *s += ">" + } else { + *s += "/>" + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// the element sort implementation + +type keyval struct { + k string + v interface{} +} +type elemListSeq []keyval + +func (e elemListSeq) Len() int { + return len(e) +} + +func (e elemListSeq) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemListSeq) Less(i, j int) bool { + var iseq, jseq int + var ok bool + if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok { + iseq = 9999999 + } + + if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok { + jseq = 9999999 + } + + return iseq <= jseq +} + +// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio + +// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent(). +func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) { + x, err := NewMapXmlSeq(b) + if err != nil { + return nil, err + } + return x.XmlSeqIndent(prefix, indent) +} diff --git a/vendor/github.com/deckarep/golang-set/.gitignore b/vendor/github.com/deckarep/golang-set/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE new file mode 100644 index 0000000..b5768f8 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md new file mode 100644 index 0000000..c3b50b2 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/README.md @@ -0,0 +1,95 @@ +[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set) +[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set) + +## golang-set + + +The missing set collection for the Go language. Until Go has sets built-in...use this. + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository +and carry-on and to the rest that find this useful please contribute in helping me make it better by: + +* Helping to make more idiomatic improvements to the code. +* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ +* Helping to make the unit-tests more robust and kick-ass. +* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) +* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) + +I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) + +*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. + +## Features (as of 9/22/2014) + +* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product) + +## Features (as of 9/15/2014) + +* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) + +## Features (as of 4/22/2014) + +* One common interface to both implementations +* Two set implementations to choose from + * a thread-safe implementation designed for concurrent use + * a non-thread-safe implementation designed for performance +* 75 benchmarks for both implementations +* 35 unit tests for both implementations +* 14 concurrent tests for the thread-safe implementation + + + +Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind +however that the Python set is a built-in type and supports additional features and syntax that make it awesome. + +## Examples but not exhaustive: + +```go +requiredClasses := mapset.NewSet() +requiredClasses.Add("Cooking") +requiredClasses.Add("English") +requiredClasses.Add("Math") +requiredClasses.Add("Biology") + +scienceSlice := []interface{}{"Biology", "Chemistry"} +scienceClasses := mapset.NewSetFromSlice(scienceSlice) + +electiveClasses := mapset.NewSet() +electiveClasses.Add("Welding") +electiveClasses.Add("Music") +electiveClasses.Add("Automotive") + +bonusClasses := mapset.NewSet() +bonusClasses.Add("Go Programming") +bonusClasses.Add("Python Programming") + +//Show me all the available classes I can take +allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) +fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} + + +//Is cooking considered a science class? +fmt.Println(scienceClasses.Contains("Cooking")) //false + +//Show me all classes that are not science classes, since I hate science. +fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} + +//Which science classes are also required classes? +fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} + +//How many bonus classes do you offer? +fmt.Println(bonusClasses.Cardinality()) //2 + +//Do you have the following classes? Welding, Automotive and English? +fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true +``` + +Thanks! + +-Ralph + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + +[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go new file mode 100644 index 0000000..9dfecad --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator struct { + C <-chan interface{} + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) { + itemChan := make(chan interface{}) + stopChan := make(chan struct{}) + return &Iterator{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go new file mode 100644 index 0000000..29eb2e5 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/set.go @@ -0,0 +1,217 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and generic set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set interface { + // Adds an element to the set. Returns whether + // the item was added. + Add(i interface{}) bool + + // Returns the number of elements in the set. + Cardinality() int + + // Removes all elements from the set, leaving + // the empty set. + Clear() + + // Returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set + + // Returns whether the given items + // are all in the set. + Contains(i ...interface{}) bool + + // Returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set) Set + + // Determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set) bool + + // Returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set) Set + + // Determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set) bool + + // Determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set) bool + + // Determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set) bool + + // Determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set) bool + + // Iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(interface{}) bool) + + // Returns a channel of elements that you can + // range over. + Iter() <-chan interface{} + + // Returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator + + // Remove a single element from the set. + Remove(i interface{}) + + // Provides a convenient string representation + // of the current state of the set. + String() string + + // Returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set) Set + + // Returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + + // same type as the receiver of the method. + // Otherwise, IsSuperset will panic. + Union(other Set) Set + + // Pop removes and returns an arbitrary item from the set. + Pop() interface{} + + // Returns all subsets of a given set (Power Set). + PowerSet() Set + + // Returns the Cartesian Product of two sets. + CartesianProduct(other Set) Set + + // Returns the members of the set as a slice. + ToSlice() []interface{} +} + +// NewSet creates and returns a reference to an empty set. Operations +// on the resulting set are thread-safe. +func NewSet(s ...interface{}) Set { + set := newThreadSafeSet() + for _, item := range s { + set.Add(item) + } + return &set +} + +// NewSetWith creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSetWith(elts ...interface{}) Set { + return NewSetFromSlice(elts) +} + +// NewSetFromSlice creates and returns a reference to a set from an +// existing slice. Operations on the resulting set are thread-safe. +func NewSetFromSlice(s []interface{}) Set { + a := NewSet(s...) + return a +} + +// NewThreadUnsafeSet creates and returns a reference to an empty set. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet() Set { + set := newThreadUnsafeSet() + return &set +} + +// NewThreadUnsafeSetFromSlice creates and returns a reference to a +// set from an existing slice. Operations on the resulting set are +// not thread-safe. +func NewThreadUnsafeSetFromSlice(s []interface{}) Set { + a := NewThreadUnsafeSet() + for _, item := range s { + a.Add(item) + } + return a +} diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go new file mode 100644 index 0000000..269b4ab --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadsafe.go @@ -0,0 +1,283 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet struct { + s threadUnsafeSet + sync.RWMutex +} + +func newThreadSafeSet() threadSafeSet { + return threadSafeSet{s: newThreadUnsafeSet()} +} + +func (set *threadSafeSet) Add(i interface{}) bool { + set.Lock() + ret := set.s.Add(i) + set.Unlock() + return ret +} + +func (set *threadSafeSet) Contains(i ...interface{}) bool { + set.RLock() + ret := set.s.Contains(i...) + set.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.IsSubset(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) IsProperSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + defer set.RUnlock() + o.RLock() + defer o.RUnlock() + + return set.s.IsProperSubset(&o.s) +} + +func (set *threadSafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadSafeSet) IsProperSuperset(other Set) bool { + return other.IsProperSubset(set) +} + +func (set *threadSafeSet) Union(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeUnion} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Intersect(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeIntersection} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Difference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) SymmetricDifference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clear() { + set.Lock() + set.s = newThreadUnsafeSet() + set.Unlock() +} + +func (set *threadSafeSet) Remove(i interface{}) { + set.Lock() + delete(set.s, i) + set.Unlock() +} + +func (set *threadSafeSet) Cardinality() int { + set.RLock() + defer set.RUnlock() + return len(set.s) +} + +func (set *threadSafeSet) Each(cb func(interface{}) bool) { + set.RLock() + for elem := range set.s { + if cb(elem) { + break + } + } + set.RUnlock() +} + +func (set *threadSafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + set.RLock() + + for elem := range set.s { + ch <- elem + } + close(ch) + set.RUnlock() + }() + + return ch +} + +func (set *threadSafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + set.RLock() + L: + for elem := range set.s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + set.RUnlock() + }() + + return iterator +} + +func (set *threadSafeSet) Equal(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.Equal(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clone() Set { + set.RLock() + + unsafeClone := set.s.Clone().(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeClone} + set.RUnlock() + return ret +} + +func (set *threadSafeSet) String() string { + set.RLock() + ret := set.s.String() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) PowerSet() Set { + set.RLock() + unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet) + set.RUnlock() + + ret := &threadSafeSet{s: newThreadUnsafeSet()} + for subset := range unsafePowerSet.Iter() { + unsafeSubset := subset.(*threadUnsafeSet) + ret.Add(&threadSafeSet{s: *unsafeSubset}) + } + return ret +} + +func (set *threadSafeSet) Pop() interface{} { + set.Lock() + defer set.Unlock() + return set.s.Pop() +} + +func (set *threadSafeSet) CartesianProduct(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeCartProduct} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + set.RLock() + for elem := range set.s { + keys = append(keys, elem) + } + set.RUnlock() + return keys +} + +func (set *threadSafeSet) MarshalJSON() ([]byte, error) { + set.RLock() + b, err := set.s.MarshalJSON() + set.RUnlock() + + return b, err +} + +func (set *threadSafeSet) UnmarshalJSON(p []byte) error { + set.RLock() + err := set.s.UnmarshalJSON(p) + set.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go new file mode 100644 index 0000000..927eb23 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go @@ -0,0 +1,340 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type threadUnsafeSet map[interface{}]struct{} + +// An OrderedPair represents a 2-tuple of values. +type OrderedPair struct { + First interface{} + Second interface{} +} + +func newThreadUnsafeSet() threadUnsafeSet { + return make(threadUnsafeSet) +} + +// Equal says whether two 2-tuples contain the same values in the same order. +func (pair *OrderedPair) Equal(other OrderedPair) bool { + if pair.First == other.First && + pair.Second == other.Second { + return true + } + + return false +} + +func (set *threadUnsafeSet) Add(i interface{}) bool { + _, found := (*set)[i] + if found { + return false //False if it existed already + } + + (*set)[i] = struct{}{} + return true +} + +func (set *threadUnsafeSet) Contains(i ...interface{}) bool { + for _, val := range i { + if _, ok := (*set)[val]; !ok { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSubset(other Set) bool { + _ = other.(*threadUnsafeSet) + if set.Cardinality() > other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsProperSubset(other Set) bool { + return set.IsSubset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadUnsafeSet) IsProperSuperset(other Set) bool { + return set.IsSuperset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) Union(other Set) Set { + o := other.(*threadUnsafeSet) + + unionedSet := newThreadUnsafeSet() + + for elem := range *set { + unionedSet.Add(elem) + } + for elem := range *o { + unionedSet.Add(elem) + } + return &unionedSet +} + +func (set *threadUnsafeSet) Intersect(other Set) Set { + o := other.(*threadUnsafeSet) + + intersection := newThreadUnsafeSet() + // loop over smaller set + if set.Cardinality() < other.Cardinality() { + for elem := range *set { + if other.Contains(elem) { + intersection.Add(elem) + } + } + } else { + for elem := range *o { + if set.Contains(elem) { + intersection.Add(elem) + } + } + } + return &intersection +} + +func (set *threadUnsafeSet) Difference(other Set) Set { + _ = other.(*threadUnsafeSet) + + difference := newThreadUnsafeSet() + for elem := range *set { + if !other.Contains(elem) { + difference.Add(elem) + } + } + return &difference +} + +func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { + _ = other.(*threadUnsafeSet) + + aDiff := set.Difference(other) + bDiff := other.Difference(set) + return aDiff.Union(bDiff) +} + +func (set *threadUnsafeSet) Clear() { + *set = newThreadUnsafeSet() +} + +func (set *threadUnsafeSet) Remove(i interface{}) { + delete(*set, i) +} + +func (set *threadUnsafeSet) Cardinality() int { + return len(*set) +} + +func (set *threadUnsafeSet) Each(cb func(interface{}) bool) { + for elem := range *set { + if cb(elem) { + break + } + } +} + +func (set *threadUnsafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + for elem := range *set { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (set *threadUnsafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + L: + for elem := range *set { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +func (set *threadUnsafeSet) Equal(other Set) bool { + _ = other.(*threadUnsafeSet) + + if set.Cardinality() != other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) Clone() Set { + clonedSet := newThreadUnsafeSet() + for elem := range *set { + clonedSet.Add(elem) + } + return &clonedSet +} + +func (set *threadUnsafeSet) String() string { + items := make([]string, 0, len(*set)) + + for elem := range *set { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +// String outputs a 2-tuple in the form "(A, B)". +func (pair OrderedPair) String() string { + return fmt.Sprintf("(%v, %v)", pair.First, pair.Second) +} + +func (set *threadUnsafeSet) Pop() interface{} { + for item := range *set { + delete(*set, item) + return item + } + return nil +} + +func (set *threadUnsafeSet) PowerSet() Set { + powSet := NewThreadUnsafeSet() + nullset := newThreadUnsafeSet() + powSet.Add(&nullset) + + for es := range *set { + u := newThreadUnsafeSet() + j := powSet.Iter() + for er := range j { + p := newThreadUnsafeSet() + if reflect.TypeOf(er).Name() == "" { + k := er.(*threadUnsafeSet) + for ek := range *(k) { + p.Add(ek) + } + } else { + p.Add(er) + } + p.Add(es) + u.Add(&p) + } + + powSet = powSet.Union(&u) + } + + return powSet +} + +func (set *threadUnsafeSet) CartesianProduct(other Set) Set { + o := other.(*threadUnsafeSet) + cartProduct := NewThreadUnsafeSet() + + for i := range *set { + for j := range *o { + elem := OrderedPair{First: i, Second: j} + cartProduct.Add(elem) + } + } + + return cartProduct +} + +func (set *threadUnsafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range *set { + keys = append(keys, elem) + } + + return keys +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) { + items := make([]string, 0, set.Cardinality()) + + for elem := range *set { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error { + var i []interface{} + + d := json.NewDecoder(bytes.NewReader(b)) + d.UseNumber() + err := d.Decode(&i) + if err != nil { + return err + } + + for _, v := range i { + switch t := v.(type) { + case []interface{}, map[string]interface{}: + continue + default: + set.Add(t) + } + } + + return nil +} diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 0000000..ae121a1 --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 0000000..91198f8 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,357 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any type (such as time.Time) that +// returns true for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()). The "unixmilli" and "unixnano" options will encode the number +// of milliseconds and nanoseconds, respectively, since January 1, 1970 (see +// time.UnixNano()). Including the "layout" struct tag (separate from the +// "url" tag) will use the value of the "layout" tag as a layout passed to +// time.Format. For example: +// +// // Encode a time.Time as YYYY-MM-DD +// Field time.Time `layout:"2006-01-02"` +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. Including the "del" struct tag (separate +// from the "url" tag) will use the value of the "del" tag as the delimiter. +// For example: +// +// // Encode a slice of bools as ints ("1" for true, "0" for false), +// // separated by exclamation points "!". +// Field []bool `url:",int" del:"!"` +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + + if name == "" { + if sf.Anonymous { + v := reflect.Indirect(sv) + if v.IsValid() && v.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, v) + continue + } + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + // if sv is a nil pointer and the custom encoder is defined on a non-pointer + // method receiver, set sv to the zero value of the underlying type + if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(encoderType) { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + // recursively dereference pointers. break on nil pointers + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del string + if opts.Contains("comma") { + del = "," + } else if opts.Contains("space") { + del = " " + } else if opts.Contains("semicolon") { + del = ";" + } else if opts.Contains("brackets") { + name = name + "[]" + } else { + del = sf.Tag.Get("del") + } + + if del != "" { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteString(del) + } + s.WriteString(valueString(sv.Index(i), opts, sf)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts, sf)) + } + } + continue + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts, sf)) + continue + } + + if sv.Kind() == reflect.Struct { + if err := reflectValue(values, sv, name); err != nil { + return err + } + continue + } + + values.Add(name, valueString(sv, opts, sf)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + if opts.Contains("unixmilli") { + return strconv.FormatInt((t.UnixNano() / 1e6), 10) + } + if opts.Contains("unixnano") { + return strconv.FormatInt(t.UnixNano(), 10) + } + if layout := sf.Tag.Get("layout"); layout != "" { + return t.Format(layout) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + type zeroable interface { + IsZero() bool + } + + if z, ok := v.Interface().(zeroable); ok { + return z.IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE new file mode 100644 index 0000000..4cd54f9 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Huawei Technologies Co., Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go new file mode 100644 index 0000000..00c74cf --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go @@ -0,0 +1,320 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "net/url" + "sort" + "strings" + "time" +) + +func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, expires int64) (requestURL string, err error) { + sh := obsClient.getSecurity() + isAkSkEmpty := sh.ak == "" || sh.sk == "" + if isAkSkEmpty == false && sh.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = sh.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = sh.securityToken + } + } + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + hostName := parsedRequestURL.Host + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + if isV4 { + date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0]) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + delete(headers, HEADER_DATE_CAMEL) + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + if len(headers[HEADER_HOST_CAMEL]) != 0 { + index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":") + if index != -1 { + port := headers[HEADER_HOST_CAMEL][0][index+1:] + if port == "80" || port == "443" { + headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]} + } + } + + } + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(sh.ak, obsClient.conf.region, shortDate) + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires) + params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";") + + requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + return "", _err + } + + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers) + signature := getSignature(stringToSign, sh.sk, obsClient.conf.region, shortDate) + + requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false)) + + } else { + originDate := headers[HEADER_DATE_CAMEL][0] + date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + expires += date.Unix() + headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)} + + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs) + signature := UrlEncode(Base64Encode(HmacSha1([]byte(sh.sk), []byte(stringToSign))), false) + if strings.Index(requestURL, "?") < 0 { + requestURL += "?" + } else { + requestURL += "&" + } + delete(headers, HEADER_DATE_CAMEL) + + if obsClient.conf.signature != SignatureObs { + requestURL += "AWS" + } + requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(sh.ak, false), expires, signature) + } + } + + return +} + +func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, hostName string) (requestURL string, err error) { + sh := obsClient.getSecurity() + isAkSkEmpty := sh.ak == "" || sh.sk == "" + if isAkSkEmpty == false && sh.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = []string{sh.securityToken} + } else { + headers[HEADER_STS_TOKEN_AMZ] = []string{sh.securityToken} + } + } + isObs := obsClient.conf.signature == SignatureObs + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + + if hostName == "" { + hostName = parsedRequestURL.Host + } + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + ak := sh.ak + sk := sh.sk + var authorization string + if isV4 { + headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD} + ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers) + authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else { + ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + hashPrefix := V2_HASH_PREFIX + if isObs { + hashPrefix = OBS_HASH_PREFIX + } + authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"]) + } + headers[HEADER_AUTH_CAMEL] = []string{authorization} + } + return +} + +func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) { + headers[HEADER_HOST_CAMEL] = []string{hostName} + if date, ok := headers[HEADER_DATE_AMZ]; ok { + flag := false + if len(date) == 1 { + if isV4 { + if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)} + flag = true + } + } else { + if strings.HasSuffix(date[0], "GMT") { + headers[HEADER_DATE_CAMEL] = []string{date[0]} + flag = true + } + } + } + if !flag { + delete(headers, HEADER_DATE_AMZ) + } + } + if _, ok := headers[HEADER_DATE_CAMEL]; !ok { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())} + } + +} + +func encodeHeaders(headers map[string][]string) { + for key, values := range headers { + for index, value := range values { + values[index] = UrlEncode(value, true) + } + headers[key] = values + } +} + +func prepareDateHeader(dataHeader, dateCamelHeader string, headers, _headers map[string][]string) { + if _, ok := _headers[HEADER_DATE_CAMEL]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } +} + +func getStringToSign(keys []string, isObs bool, _headers map[string][]string) []string { + stringToSign := make([]string, 0, len(keys)) + for _, key := range keys { + var value string + prefixHeader := HEADER_PREFIX + prefixMetaHeader := HEADER_PREFIX_META + if isObs { + prefixHeader = HEADER_PREFIX_OBS + prefixMetaHeader = HEADER_PREFIX_META_OBS + } + if strings.HasPrefix(key, prefixHeader) { + if strings.HasPrefix(key, prefixMetaHeader) { + for index, v := range _headers[key] { + value += strings.TrimSpace(v) + if index != len(_headers[key])-1 { + value += "," + } + } + } else { + value = strings.Join(_headers[key], ",") + } + value = fmt.Sprintf("%s:%s", key, value) + } else { + value = strings.Join(_headers[key], ",") + } + stringToSign = append(stringToSign, value) + } + return stringToSign +} + +func attachHeaders(headers map[string][]string, isObs bool) string { + length := len(headers) + _headers := make(map[string][]string, length) + keys := make([]string, 0, length) + + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + prefixheader := HEADER_PREFIX + if isObs { + prefixheader = HEADER_PREFIX_OBS + } + if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) { + keys = append(keys, _key) + _headers[_key] = value + } + } else { + delete(headers, key) + } + } + + for _, interestedHeader := range interestedHeaders { + if _, ok := _headers[interestedHeader]; !ok { + _headers[interestedHeader] = []string{""} + keys = append(keys, interestedHeader) + } + } + dateCamelHeader := PARAM_DATE_AMZ_CAMEL + dataHeader := HEADER_DATE_AMZ + if isObs { + dateCamelHeader = PARAM_DATE_OBS_CAMEL + dataHeader = HEADER_DATE_OBS + } + prepareDateHeader(dataHeader, dateCamelHeader, headers, _headers) + + sort.Strings(keys) + stringToSign := getStringToSign(keys, isObs, _headers) + return strings.Join(stringToSign, "\n") +} + +func getScope(region, shortDate string) string { + return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX) +} + +func getCredential(ak, region, shortDate string) (string, string) { + scope := getScope(region, shortDate) + return fmt.Sprintf("%s/%s", ak, scope), scope +} + +func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) { + length := len(headers) + _headers := make(map[string][]string, length) + signedHeaders := make([]string, 0, length) + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + signedHeaders = append(signedHeaders, _key) + _headers[_key] = value + } else { + delete(headers, key) + } + } + sort.Strings(signedHeaders) + return signedHeaders, _headers +} + +func getSignature(stringToSign, sk, region, shortDate string) string { + key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate)) + key = HmacSha256(key, []byte(region)) + key = HmacSha256(key, []byte(V4_SERVICE_NAME)) + key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX)) + return Hex(HmacSha256(key, []byte(stringToSign))) +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go new file mode 100644 index 0000000..1812061 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go @@ -0,0 +1,55 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "strings" +) + +func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string { + stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "") + + var isSecurityToken bool + var securityToken []string + if isObs { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS] + } else { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + parmas := strings.Split(canonicalizedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + } + logStringToSign := stringToSign + if isSecurityToken && len(securityToken) > 0 { + logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign) + return stringToSign +} + +func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string { + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))} +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go new file mode 100644 index 0000000..70abe96 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go @@ -0,0 +1,137 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "strings" + "time" +) + +func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string { + canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4) + canonicalRequest = append(canonicalRequest, method) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, canonicalizedURL) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, queryURL) + canonicalRequest = append(canonicalRequest, "\n") + + for _, signedHeader := range signedHeaders { + values, _ := headers[signedHeader] + for _, value := range values { + canonicalRequest = append(canonicalRequest, signedHeader) + canonicalRequest = append(canonicalRequest, ":") + canonicalRequest = append(canonicalRequest, value) + canonicalRequest = append(canonicalRequest, "\n") + } + } + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";")) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, payload) + + _canonicalRequest := strings.Join(canonicalRequest, "") + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + query = strings.Split(queryURL, "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + logCanonicalRequest := _canonicalRequest + if isSecurityToken && len(securityToken) > 0 { + logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest) + + stringToSign := make([]string, 0, 7) + stringToSign = append(stringToSign, V4_HASH_PREFIX) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, longDate) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, scope) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest))) + + _stringToSign := strings.Join(stringToSign, "") + + doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign) + return _stringToSign +} + +// V4Auth is a wrapper for v4Auth +func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers) +} + +func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + var t time.Time + if val, ok := headers[HEADER_DATE_AMZ]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[HEADER_DATE_CAMEL]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else { + t = time.Now().UTC() + } + shortDate := t.Format(SHORT_DATE_FORMAT) + longDate := t.Format(LONG_DATE_FORMAT) + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(ak, region, shortDate) + + payload := UNSIGNED_PAYLOAD + if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok { + payload = val[0] + } + stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers) + + signature := getSignature(stringToSign, sk, region, shortDate) + + ret := make(map[string]string, 3) + ret["Credential"] = credential + ret["SignedHeaders"] = strings.Join(signedHeaders, ";") + ret["Signature"] = signature + return ret +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go new file mode 100644 index 0000000..501ab5e --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go @@ -0,0 +1,61 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "net/http" + "strings" +) + +// ObsClient defines OBS client. +type ObsClient struct { + conf *config + httpClient *http.Client +} + +// New creates a new ObsClient instance. +func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) { + conf := &config{endpoint: endpoint} + conf.securityProviders = make([]securityProvider, 0, 3) + conf.securityProviders = append(conf.securityProviders, NewBasicSecurityProvider(ak, sk, "")) + + conf.maxRetryCount = -1 + conf.maxRedirectCount = -1 + for _, configurer := range configurers { + configurer(conf) + } + + if err := conf.initConfigWithDefault(); err != nil { + return nil, err + } + err := conf.getTransport() + if err != nil { + return nil, err + } + + if isWarnLogEnabled() { + info := make([]string, 3) + info[0] = fmt.Sprintf("[OBS SDK Version=%s", obsSdkVersion) + info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint) + accessMode := "Virtual Hosting" + if conf.pathStyle { + accessMode = "Path" + } + info[2] = fmt.Sprintf("Access Mode=%s]", accessMode) + doLog(LEVEL_WARN, strings.Join(info, "];[")) + } + doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf) + obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}} + return obsClient, nil +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go new file mode 100644 index 0000000..f8b2468 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go @@ -0,0 +1,731 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" + "fmt" + "strings" +) + +// ListBuckets lists buckets. +// +// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order. +func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) { + if input == nil { + input = &ListBucketsInput{} + } + output = &ListBucketsOutput{} + err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// CreateBucket creates a bucket. +// +// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS. +func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("CreateBucketInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucket deletes a bucket. +// +// You can use this API to delete a bucket. The bucket to be deleted must be empty +// (containing no objects, noncurrent object versions, or part fragments). +func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicy sets bucket storage class. +// +// You can use this API to set storage class for bucket. +func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketStoragePolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputS3 *getBucketStoragePolicyOutputS3 + outputS3 = &getBucketStoragePolicyOutputS3{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputS3.BaseModel + output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass) + return +} + +func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputObs *getBucketStoragePolicyOutputObs + outputObs = &getBucketStoragePolicyOutputObs{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputObs.BaseModel + output.StorageClass = outputObs.StorageClass + return +} + +// GetBucketStoragePolicy gets bucket storage class. +// +// You can use this API to obtain the storage class of a bucket. +func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketStoragePolicyObs(bucketName, extensions) + } + return obsClient.getBucketStoragePolicyS3(bucketName, extensions) +} + +// SetBucketQuota sets the bucket quota. +// +// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1. +func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketQuotaInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketQuota gets the bucket quota. +// +// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota. +func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions) + if err != nil { + output = nil + } + return +} + +// HeadBucket checks whether a bucket exists. +// +// You can use this API to check whether a bucket exists. +func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadata gets the metadata of a bucket. +// +// You can use this API to send a HEAD request to a bucket to obtain the bucket +// metadata such as the storage class and CORS rules (if set). +func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// GetBucketStorageInfo gets storage information about a bucket. +// +// You can use this API to obtain storage information about a bucket, including the +// bucket size and number of objects in the bucket. +func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputS3 *getBucketLocationOutputS3 + outputS3 = &getBucketLocationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputS3.BaseModel + output.Location = outputS3.Location + } + return +} +func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputObs *getBucketLocationOutputObs + outputObs = &getBucketLocationOutputObs{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Location = outputObs.Location + } + return +} + +// GetBucketLocation gets the location of a bucket. +// +// You can use this API to obtain the bucket location. +func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketLocationObs(bucketName, extensions) + } + return obsClient.getBucketLocationS3(bucketName, extensions) +} + +// SetBucketAcl sets the bucket ACL. +// +// You can use this API to set the ACL for a bucket. +func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + var outputObs *getBucketACLOutputObs + outputObs = &getBucketACLOutputObs{} + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Owner = outputObs.Owner + output.Grants = make([]Grant, 0, len(outputObs.Grants)) + for _, valGrant := range outputObs.Grants { + tempOutput := Grant{} + tempOutput.Delivered = valGrant.Delivered + tempOutput.Permission = valGrant.Permission + tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName + tempOutput.Grantee.ID = valGrant.Grantee.ID + tempOutput.Grantee.Type = valGrant.Grantee.Type + tempOutput.Grantee.URI = GroupAllUsers + + output.Grants = append(output.Grants, tempOutput) + } + } + return +} + +// GetBucketAcl gets the bucket ACL. +// +// You can use this API to obtain a bucket ACL. +func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketACLObs(bucketName, extensions) + } + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicy sets the bucket policy. +// +// You can use this API to set a bucket policy. If the bucket already has a policy, the +// policy will be overwritten by the one specified in this request. +func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketPolicy is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicy gets the bucket policy. +// +// You can use this API to obtain the policy of a bucket. +func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicy deletes the bucket policy. +// +// You can use this API to delete the policy of a bucket. +func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketCors sets CORS rules for a bucket. +// +// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests. +func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketCorsInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketCors gets CORS rules of a bucket. +// +// You can use this API to obtain the CORS rules of a specified bucket. +func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCors deletes CORS rules of a bucket. +// +// You can use this API to delete the CORS rules of a specified bucket. +func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioning sets the versioning status for a bucket. +// +// You can use this API to set the versioning status for a bucket. +func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketVersioningInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioning gets the versioning status of a bucket. +// +// You can use this API to obtain the versioning status of a bucket. +func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfiguration sets website hosting for a bucket. +// +// You can use this API to set website hosting for a bucket. +func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketWebsiteConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket. +// +// You can use this API to obtain the website hosting settings of a bucket. +func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket. +// +// You can use this API to delete the website hosting settings of a bucket. +func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfiguration sets the bucket logging. +// +// You can use this API to configure access logging for a bucket. +func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLoggingConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfiguration gets the logging settings of a bucket. +// +// You can use this API to obtain the access logging settings of a bucket. +func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket. +// +// You can use this API to set lifecycle rules for a bucket, to periodically transit +// storage classes of objects and delete objects in the bucket. +func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLifecycleConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket. +// +// You can use this API to obtain the lifecycle rules of a bucket. +func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket. +// +// You can use this API to delete all lifecycle rules of a bucket. +func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketEncryption sets the default server-side encryption for a bucket. +// +// You can use this API to create or update the default server-side encryption for a bucket. +func (obsClient ObsClient) SetBucketEncryption(input *SetBucketEncryptionInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketEncryptionInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketEncryption", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketEncryption gets the encryption configuration of a bucket. +// +// You can use this API to obtain obtain the encryption configuration of a bucket. +func (obsClient ObsClient) GetBucketEncryption(bucketName string, extensions ...extensionOptions) (output *GetBucketEncryptionOutput, err error) { + output = &GetBucketEncryptionOutput{} + err = obsClient.doActionWithBucket("GetBucketEncryption", HTTP_GET, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketEncryption deletes the encryption configuration of a bucket. +// +// You can use this API to delete the encryption configuration of a bucket. +func (obsClient ObsClient) DeleteBucketEncryption(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketEncryption", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketTagging sets bucket tags. +// +// You can use this API to set bucket tags. +func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketTaggingInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketTagging gets bucket tags. +// +// You can use this API to obtain the tags of a specified bucket. +func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTagging deletes bucket tags. +// +// You can use this API to delete the tags of a specified bucket. +func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketNotification sets event notification for a bucket. +// +// You can use this API to configure event notification for a bucket. You will be notified of all +// specified operations performed on the bucket. +func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketNotificationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketNotification gets event notification settings of a bucket. +// +// You can use this API to obtain the event notification configuration of a bucket. +func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) { + if obsClient.conf.signature != SignatureObs { + return obsClient.getBucketNotificationS3(bucketName, extensions) + } + output = &GetBucketNotificationOutput{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) { + outputS3 := &getBucketNotificationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions) + if err != nil { + return nil, err + } + + output = &GetBucketNotificationOutput{} + output.BaseModel = outputS3.BaseModel + topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations)) + for _, topicConfigurationS3 := range outputS3.TopicConfigurations { + topicConfiguration := TopicConfiguration{} + topicConfiguration.ID = topicConfigurationS3.ID + topicConfiguration.Topic = topicConfigurationS3.Topic + topicConfiguration.FilterRules = topicConfigurationS3.FilterRules + + events := make([]EventType, 0, len(topicConfigurationS3.Events)) + for _, event := range topicConfigurationS3.Events { + events = append(events, ParseStringToEventType(event)) + } + topicConfiguration.Events = events + topicConfigurations = append(topicConfigurations, topicConfiguration) + } + output.TopicConfigurations = topicConfigurations + return +} + +// SetBucketRequestPayment sets requester-pays setting for a bucket. +func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketRequestPaymentInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPayment gets requester-pays setting of a bucket. +func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketFetchPolicy sets the bucket fetch policy. +// +// You can use this API to set a bucket fetch policy. +func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchPolicyInput is nil") + } + if strings.TrimSpace(string(input.Status)) == "" { + return nil, errors.New("Fetch policy status is empty") + } + if strings.TrimSpace(input.Agency) == "" { + return nil, errors.New("Fetch policy agency is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchPolicy gets the bucket fetch policy. +// +// You can use this API to obtain the fetch policy of a bucket. +func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchPolicyInput is nil") + } + output = &GetBucketFetchPolicyOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketFetchPolicy deletes the bucket fetch policy. +// +// You can use this API to delete the fetch policy of a bucket. +func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("DeleteBucketFetchPolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketFetchJob sets the bucket fetch job. +// +// You can use this API to set a bucket fetch job. +func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.URL) == "" { + return nil, errors.New("URL is empty") + } + output = &SetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchJob gets the bucket fetch job. +// +// You can use this API to obtain the fetch job of a bucket. +func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.JobID) == "" { + return nil, errors.New("JobID is empty") + } + output = &GetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions) + if err != nil { + output = nil + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go new file mode 100644 index 0000000..d68aec9 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go @@ -0,0 +1,408 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" + "io" + "os" + "strings" +) + +// ListObjects lists objects in a bucket. +// +// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed. +func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) { + if input == nil { + return nil, errors.New("ListObjectsInput is nil") + } + output = &ListObjectsOutput{} + err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + if output.EncodingType == "url" { + err = decodeListObjectsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// ListVersions lists versioning objects in a bucket. +// +// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed. +func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) { + if input == nil { + return nil, errors.New("ListVersionsInput is nil") + } + output = &ListVersionsOutput{} + err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + if output.EncodingType == "url" { + err = decodeListVersionsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// HeadObject checks whether an object exists. +// +// You can use this API to check whether an object exists. +func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("HeadObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetObjectMetadata sets object metadata. +func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) { + output = &SetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseSetObjectMetadataOutput(output) + } + return +} + +// DeleteObject deletes an object. +// +// You can use this API to delete an object from a specified bucket. +func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectInput is nil") + } + output = &DeleteObjectOutput{} + err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjects deletes objects in a batch. +// +// You can use this API to batch delete objects from a specified bucket. +func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectsInput is nil") + } + output = &DeleteObjectsOutput{} + err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeDeleteObjectsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err) + output = nil + } + } + return +} + +// SetObjectAcl sets ACL for an object. +// +// You can use this API to set the ACL for an object in a specified bucket. +func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetObjectAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectAcl gets the ACL of an object. +// +// You can use this API to obtain the ACL of an object in a specified bucket. +func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectAclInput is nil") + } + output = &GetObjectAclOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObject restores an object. +func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("RestoreObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadata gets object metadata. +// +// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata. +func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectMetadataInput is nil") + } + output = &GetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObject downloads object. +// +// You can use this API to download an object in a specified bucket. +func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectInput is nil") + } + output = &GetObjectOutput{} + err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObject uploads an object to the specified bucket. +func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutObjectInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + output = &PutObjectOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if input.ContentLength > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength} + } + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + return contentType + } + if contentType, ok := mimeTypes[strings.ToLower(sourceFile[strings.LastIndex(sourceFile, ".")+1:])]; ok { + return contentType + } + return +} + +func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool { + if input.ContentType == "" && input.Key != "" { + return true + } + return false +} + +// PutFile uploads a file to the specified bucket. +func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutFileInput is nil") + } + + var body io.Reader + sourceFile := strings.TrimSpace(input.SourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + if input.ContentLength > 0 { + if input.ContentLength > stat.Size() { + input.ContentLength = stat.Size() + } + fileReaderWrapper.totalCount = input.ContentLength + } else { + fileReaderWrapper.totalCount = stat.Size() + } + body = fileReaderWrapper + } + + _input := &PutObjectInput{} + _input.PutObjectBasicInput = input.PutObjectBasicInput + _input.Body = body + + if obsClient.isGetContentType(_input) { + _input.ContentType = obsClient.getContentType(_input, sourceFile) + } + + output = &PutObjectOutput{} + err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObject creates a copy for an existing object. +// +// You can use this API to create a copy for an object in a specified bucket. +func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) { + if input == nil { + return nil, errors.New("CopyObjectInput is nil") + } + + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyObjectOutput{} + err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +func (obsClient ObsClient) AppendObject(input *AppendObjectInput, extensions ...extensionOptions) (output *AppendObjectOutput, err error) { + if input == nil { + return nil, errors.New("AppendObjectInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + output = &AppendObjectOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if input.ContentLength > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength} + } + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("AppendObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("AppendObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + if err = ParseAppendObjectOutput(output); err != nil { + output = nil + } + } + return +} + +func (obsClient ObsClient) ModifyObject(input *ModifyObjectInput, extensions ...extensionOptions) (output *ModifyObjectOutput, err error) { + if input == nil { + return nil, errors.New("ModifyObjectInput is nil") + } + + output = &ModifyObjectOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if input.ContentLength > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength} + } + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParseModifyObjectOutput(output) + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go new file mode 100644 index 0000000..e6e3584 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go @@ -0,0 +1,49 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "strings" +) + +// Refresh refreshes ak, sk and securityToken for obsClient. +func (obsClient ObsClient) Refresh(ak, sk, securityToken string) { + for _, sp := range obsClient.conf.securityProviders { + if bsp, ok := sp.(*BasicSecurityProvider); ok { + bsp.refresh(strings.TrimSpace(ak), strings.TrimSpace(sk), strings.TrimSpace(securityToken)) + break + } + } +} + +func (obsClient ObsClient) getSecurity() securityHolder { + if obsClient.conf.securityProviders != nil { + for _, sp := range obsClient.conf.securityProviders { + if sp == nil { + continue + } + sh := sp.getSecurity() + if sh.ak != "" && sh.sk != "" { + return sh + } + } + } + return emptySecurityHolder +} + +// Close closes ObsClient. +func (obsClient ObsClient) Close() { + obsClient.httpClient = nil + obsClient.conf.transport.CloseIdleConnections() + obsClient.conf = nil +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go new file mode 100644 index 0000000..f83a0d1 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go @@ -0,0 +1,250 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" + "io" + "os" + "sort" + "strings" +) + +// ListMultipartUploads lists the multipart uploads. +// +// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket. +func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) { + if input == nil { + return nil, errors.New("ListMultipartUploadsInput is nil") + } + output = &ListMultipartUploadsOutput{} + err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeListMultipartUploadsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err) + output = nil + } + } + return +} + +// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("AbortMultipartUploadInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUpload initializes a multipart upload. +func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("InitiateMultipartUploadInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + + output = &InitiateMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + if output.EncodingType == "url" { + err = decodeInitiateMultipartUploadOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket +// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB, +// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000. +func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) { + if _input == nil { + return nil, errors.New("UploadPartInput is nil") + } + + if _input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + input := &UploadPartInput{} + input.Bucket = _input.Bucket + input.Key = _input.Key + input.PartNumber = _input.PartNumber + input.UploadId = _input.UploadId + input.ContentMD5 = _input.ContentMD5 + input.SourceFile = _input.SourceFile + input.Offset = _input.Offset + input.PartSize = _input.PartSize + input.SseHeader = _input.SseHeader + input.Body = _input.Body + + output = &UploadPartOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize} + } + } else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileSize := stat.Size() + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + if input.Offset < 0 || input.Offset > fileSize { + input.Offset = 0 + } + + if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) { + input.PartSize = fileSize - input.Offset + } + fileReaderWrapper.totalCount = input.PartSize + if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil { + return nil, err + } + input.Body = fileReaderWrapper + repeatable = true + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} + +// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("CompleteMultipartUploadInput is nil") + } + + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + var parts partSlice = input.Parts + sort.Sort(parts) + + output = &CompleteMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + if output.EncodingType == "url" { + err = decodeCompleteMultipartUploadOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// ListParts lists the uploaded parts in a bucket by using the multipart upload ID. +func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) { + if input == nil { + return nil, errors.New("ListPartsInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &ListPartsOutput{} + err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeListPartsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err) + output = nil + } + } + return +} + +// CopyPart copy a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) { + if input == nil { + return nil, errors.New("CopyPartInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyPartOutput{} + err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go new file mode 100644 index 0000000..aef1b4c --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go @@ -0,0 +1,59 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +// UploadFile resume uploads. +// +// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file +// upload failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.UploadFile + ".uploadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize < MIN_PART_SIZE { + input.PartSize = MIN_PART_SIZE + } else if input.PartSize > MAX_PART_SIZE { + input.PartSize = MAX_PART_SIZE + } + + output, err = obsClient.resumeUpload(input, extensions) + return +} + +// DownloadFile resume downloads. +// +// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file +// download failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input.DownloadFile == "" { + input.DownloadFile = input.Key + } + + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.DownloadFile + ".downloadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize <= 0 { + input.PartSize = DEFAULT_PART_SIZE + } + + output, err = obsClient.resumeDownload(input, extensions) + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go new file mode 100644 index 0000000..1448d03 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go @@ -0,0 +1,497 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +type urlHolder struct { + scheme string + host string + port int +} + +type config struct { + securityProviders []securityProvider + urlHolder *urlHolder + pathStyle bool + cname bool + sslVerify bool + endpoint string + signature SignatureType + region string + connectTimeout int + socketTimeout int + headerTimeout int + idleConnTimeout int + finalTimeout int + maxRetryCount int + proxyURL string + maxConnsPerHost int + pemCerts []byte + transport *http.Transport + ctx context.Context + maxRedirectCount int + userAgent string + enableCompression bool +} + +func (conf config) String() string { + return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+ + "\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+ + "\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]", + conf.endpoint, conf.signature, conf.pathStyle, conf.region, + conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout, + conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount, + ) +} + +type configurer func(conf *config) + +func WithSecurityProviders(sps ...securityProvider) configurer { + return func(conf *config) { + for _, sp := range sps { + if sp != nil { + conf.securityProviders = append(conf.securityProviders, sp) + } + } + } +} + +// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts. +func WithSslVerify(sslVerify bool) configurer { + return WithSslVerifyAndPemCerts(sslVerify, nil) +} + +// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts. +func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer { + return func(conf *config) { + conf.sslVerify = sslVerify + conf.pemCerts = pemCerts + } +} + +// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers. +func WithHeaderTimeout(headerTimeout int) configurer { + return func(conf *config) { + conf.headerTimeout = headerTimeout + } +} + +// WithProxyUrl is a configurer for ObsClient to set HTTP proxy. +func WithProxyUrl(proxyURL string) configurer { + return func(conf *config) { + conf.proxyURL = proxyURL + } +} + +// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections. +func WithMaxConnections(maxConnsPerHost int) configurer { + return func(conf *config) { + conf.maxConnsPerHost = maxConnsPerHost + } +} + +// WithPathStyle is a configurer for ObsClient. +func WithPathStyle(pathStyle bool) configurer { + return func(conf *config) { + conf.pathStyle = pathStyle + } +} + +// WithSignature is a configurer for ObsClient. +func WithSignature(signature SignatureType) configurer { + return func(conf *config) { + conf.signature = signature + } +} + +// WithRegion is a configurer for ObsClient. +func WithRegion(region string) configurer { + return func(conf *config) { + conf.region = region + } +} + +// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing +// an http/https connection, in seconds. +func WithConnectTimeout(connectTimeout int) configurer { + return func(conf *config) { + conf.connectTimeout = connectTimeout + } +} + +// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at +// the socket layer, in seconds. +func WithSocketTimeout(socketTimeout int) configurer { + return func(conf *config) { + conf.socketTimeout = socketTimeout + } +} + +// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection +// in the connection pool, in seconds. +func WithIdleConnTimeout(idleConnTimeout int) configurer { + return func(conf *config) { + conf.idleConnTimeout = idleConnTimeout + } +} + +// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal. +func WithMaxRetryCount(maxRetryCount int) configurer { + return func(conf *config) { + conf.maxRetryCount = maxRetryCount + } +} + +// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys. +func WithSecurityToken(securityToken string) configurer { + return func(conf *config) { + for _, sp := range conf.securityProviders { + if bsp, ok := sp.(*BasicSecurityProvider); ok { + sh := bsp.getSecurity() + bsp.refresh(sh.ak, sh.sk, securityToken) + break + } + } + } +} + +// WithHttpTransport is a configurer for ObsClient to set the customized http Transport. +func WithHttpTransport(transport *http.Transport) configurer { + return func(conf *config) { + conf.transport = transport + } +} + +// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request. +func WithRequestContext(ctx context.Context) configurer { + return func(conf *config) { + conf.ctx = ctx + } +} + +// WithCustomDomainName is a configurer for ObsClient. +func WithCustomDomainName(cname bool) configurer { + return func(conf *config) { + conf.cname = cname + } +} + +// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected. +func WithMaxRedirectCount(maxRedirectCount int) configurer { + return func(conf *config) { + conf.maxRedirectCount = maxRedirectCount + } +} + +// WithUserAgent is a configurer for ObsClient to set the User-Agent. +func WithUserAgent(userAgent string) configurer { + return func(conf *config) { + conf.userAgent = userAgent + } +} + +// WithEnableCompression is a configurer for ObsClient to set the Transport.DisableCompression. +func WithEnableCompression(enableCompression bool) configurer { + return func(conf *config) { + conf.enableCompression = enableCompression + } +} + +func (conf *config) prepareConfig() { + if conf.connectTimeout <= 0 { + conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT + } + + if conf.socketTimeout <= 0 { + conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT + } + + conf.finalTimeout = conf.socketTimeout * 10 + + if conf.headerTimeout <= 0 { + conf.headerTimeout = DEFAULT_HEADER_TIMEOUT + } + + if conf.idleConnTimeout < 0 { + conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT + } + + if conf.maxRetryCount < 0 { + conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT + } + + if conf.maxConnsPerHost <= 0 { + conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST + } + + if conf.maxRedirectCount < 0 { + conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT + } + + if conf.pathStyle && conf.signature == SignatureObs { + conf.signature = SignatureV2 + } +} + +func (conf *config) initConfigWithDefault() error { + conf.endpoint = strings.TrimSpace(conf.endpoint) + if conf.endpoint == "" { + return errors.New("endpoint is not set") + } + + if index := strings.Index(conf.endpoint, "?"); index > 0 { + conf.endpoint = conf.endpoint[:index] + } + + for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 { + conf.endpoint = conf.endpoint[:len(conf.endpoint)-1] + } + + if conf.signature == "" { + conf.signature = DEFAULT_SIGNATURE + } + + urlHolder := &urlHolder{} + var address string + if strings.HasPrefix(conf.endpoint, "https://") { + urlHolder.scheme = "https" + address = conf.endpoint[len("https://"):] + } else if strings.HasPrefix(conf.endpoint, "http://") { + urlHolder.scheme = "http" + address = conf.endpoint[len("http://"):] + } else { + urlHolder.scheme = "https" + address = conf.endpoint + } + + addr := strings.Split(address, ":") + if len(addr) == 2 { + if port, err := strconv.Atoi(addr[1]); err == nil { + urlHolder.port = port + } + } + urlHolder.host = addr[0] + if urlHolder.port == 0 { + if urlHolder.scheme == "https" { + urlHolder.port = 443 + } else { + urlHolder.port = 80 + } + } + + if IsIP(urlHolder.host) { + conf.pathStyle = true + } + + conf.urlHolder = urlHolder + + conf.region = strings.TrimSpace(conf.region) + if conf.region == "" { + conf.region = DEFAULT_REGION + } + + conf.prepareConfig() + conf.proxyURL = strings.TrimSpace(conf.proxyURL) + return nil +} + +func (conf *config) getTransport() error { + if conf.transport == nil { + conf.transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout)) + if err != nil { + return nil, err + } + return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil + }, + MaxIdleConns: conf.maxConnsPerHost, + MaxIdleConnsPerHost: conf.maxConnsPerHost, + ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout), + IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout), + } + + if conf.proxyURL != "" { + proxyURL, err := url.Parse(conf.proxyURL) + if err != nil { + return err + } + conf.transport.Proxy = http.ProxyURL(proxyURL) + } + + tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify} + if conf.sslVerify && conf.pemCerts != nil { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(conf.pemCerts) + tlsConfig.RootCAs = pool + } + + conf.transport.TLSClientConfig = tlsConfig + conf.transport.DisableCompression = !conf.enableCompression + } + + return nil +} + +func checkRedirectFunc(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse +} + +// DummyQueryEscape return the input string. +func DummyQueryEscape(s string) string { + return s +} + +func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) { + urlHolder := conf.urlHolder + if conf.cname { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + if conf.signature == "v4" { + canonicalizedURL = "/" + } else { + canonicalizedURL = "/" + urlHolder.host + "/" + } + } else { + if bucketName == "" { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + canonicalizedURL = "/" + } else { + if conf.pathStyle { + requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName) + canonicalizedURL = "/" + bucketName + } else { + requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port) + if conf.signature == "v2" || conf.signature == "OBS" { + canonicalizedURL = "/" + bucketName + "/" + } else { + canonicalizedURL = "/" + } + } + } + } + return +} + +func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) { + if escape { + tempKey := []rune(objectKey) + result := make([]string, 0, len(tempKey)) + for _, value := range tempKey { + if string(value) == "/" { + result = append(result, string(value)) + } else { + if string(value) == " " { + result = append(result, url.PathEscape(string(value))) + } else { + result = append(result, url.QueryEscape(string(value))) + } + } + } + encodeObjectKey = strings.Join(result, "") + } else { + encodeObjectKey = escapeFunc(objectKey) + } + return +} + +func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) { + if escape { + return url.QueryEscape + } + return DummyQueryEscape +} + +func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) { + + requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName) + var escapeFunc func(s string) string + escapeFunc = conf.prepareEscapeFunc(escape) + + if objectKey != "" { + var encodeObjectKey string + encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc) + requestURL += "/" + encodeObjectKey + if !strings.HasSuffix(canonicalizedURL, "/") { + canonicalizedURL += "/" + } + canonicalizedURL += encodeObjectKey + } + + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, strings.TrimSpace(key)) + } + sort.Strings(keys) + i := 0 + + for index, key := range keys { + if index == 0 { + requestURL += "?" + } else { + requestURL += "&" + } + _key := url.QueryEscape(key) + requestURL += _key + + _value := params[key] + if conf.signature == "v4" { + requestURL += "=" + url.QueryEscape(_value) + } else { + if _value != "" { + requestURL += "=" + url.QueryEscape(_value) + _value = "=" + _value + } else { + _value = "" + } + lowerKey := strings.ToLower(key) + _, ok := allowedResourceParameterNames[lowerKey] + prefixHeader := HEADER_PREFIX + isObs := conf.signature == SignatureObs + if isObs { + prefixHeader = HEADER_PREFIX_OBS + } + ok = ok || strings.HasPrefix(lowerKey, prefixHeader) + if ok { + if i == 0 { + canonicalizedURL += "?" + } else { + canonicalizedURL += "&" + } + canonicalizedURL += getQueryURL(_key, _value) + i++ + } + } + } + return +} + +func getQueryURL(key, value string) string { + queryURL := "" + queryURL += key + queryURL += value + return queryURL +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go new file mode 100644 index 0000000..8aabd6e --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go @@ -0,0 +1,969 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +const ( + obsSdkVersion = "3.21.8" + USER_AGENT = "obs-sdk-go/" + obsSdkVersion + HEADER_PREFIX = "x-amz-" + HEADER_PREFIX_META = "x-amz-meta-" + HEADER_PREFIX_OBS = "x-obs-" + HEADER_PREFIX_META_OBS = "x-obs-meta-" + HEADER_DATE_AMZ = "x-amz-date" + HEADER_DATE_OBS = "x-obs-date" + HEADER_STS_TOKEN_AMZ = "x-amz-security-token" + HEADER_STS_TOKEN_OBS = "x-obs-security-token" + HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId" + PREFIX_META = "meta-" + + HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256" + HEADER_ACL_AMZ = "x-amz-acl" + HEADER_ACL_OBS = "x-obs-acl" + HEADER_ACL = "acl" + HEADER_LOCATION_AMZ = "location" + HEADER_BUCKET_LOCATION_OBS = "bucket-location" + HEADER_COPY_SOURCE = "copy-source" + HEADER_COPY_SOURCE_RANGE = "copy-source-range" + HEADER_RANGE = "Range" + HEADER_STORAGE_CLASS = "x-default-storage-class" + HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class" + HEADER_VERSION_OBS = "version" + HEADER_GRANT_READ_OBS = "grant-read" + HEADER_GRANT_WRITE_OBS = "grant-write" + HEADER_GRANT_READ_ACP_OBS = "grant-read-acp" + HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp" + HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control" + HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered" + HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered" + HEADER_REQUEST_ID = "request-id" + HEADER_BUCKET_REGION = "bucket-region" + HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin" + HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers" + HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age" + HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods" + HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers" + HEADER_EPID_HEADERS = "epid" + HEADER_VERSION_ID = "version-id" + HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id" + HEADER_DELETE_MARKER = "delete-marker" + HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location" + HEADER_METADATA_DIRECTIVE = "metadata-directive" + HEADER_EXPIRATION = "expiration" + HEADER_EXPIRES_OBS = "x-obs-expires" + HEADER_RESTORE = "restore" + HEADER_OBJECT_TYPE = "object-type" + HEADER_NEXT_APPEND_POSITION = "next-append-position" + HEADER_STORAGE_CLASS2 = "storage-class" + HEADER_CONTENT_LENGTH = "content-length" + HEADER_CONTENT_TYPE = "content-type" + HEADER_CONTENT_LANGUAGE = "content-language" + HEADER_EXPIRES = "expires" + HEADER_CACHE_CONTROL = "cache-control" + HEADER_CONTENT_DISPOSITION = "content-disposition" + HEADER_CONTENT_ENCODING = "content-encoding" + HEADER_AZ_REDUNDANCY = "az-redundancy" + HEADER_BUCKET_TYPE = "bucket-type" + headerOefMarker = "oef-marker" + + HEADER_ETAG = "etag" + HEADER_LASTMODIFIED = "last-modified" + + HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match" + HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match" + HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since" + HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since" + + HEADER_IF_MATCH = "If-Match" + HEADER_IF_NONE_MATCH = "If-None-Match" + HEADER_IF_MODIFIED_SINCE = "If-Modified-Since" + HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since" + + HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm" + HEADER_SSEC_KEY = "server-side-encryption-customer-key" + HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_ENCRYPTION = "server-side-encryption" + HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id" + HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id" + + HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm" + HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key" + HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id" + + HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id" + + HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect" + + headerFSFileInterface = "fs-file-interface" + + HEADER_DATE_CAMEL = "Date" + HEADER_HOST_CAMEL = "Host" + HEADER_HOST = "host" + HEADER_AUTH_CAMEL = "Authorization" + HEADER_MD5_CAMEL = "Content-MD5" + HEADER_LOCATION_CAMEL = "Location" + HEADER_CONTENT_LENGTH_CAMEL = "Content-Length" + HEADER_CONTENT_TYPE_CAML = "Content-Type" + HEADER_USER_AGENT_CAMEL = "User-Agent" + HEADER_ORIGIN_CAMEL = "Origin" + HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers" + HEADER_CACHE_CONTROL_CAMEL = "Cache-Control" + HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition" + HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding" + HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language" + HEADER_EXPIRES_CAMEL = "Expires" + + PARAM_VERSION_ID = "versionId" + PARAM_RESPONSE_CONTENT_TYPE = "response-content-type" + PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language" + PARAM_RESPONSE_EXPIRES = "response-expires" + PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control" + PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition" + PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding" + PARAM_IMAGE_PROCESS = "x-image-process" + + PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm" + PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential" + PARAM_DATE_AMZ_CAMEL = "X-Amz-Date" + PARAM_DATE_OBS_CAMEL = "X-Obs-Date" + PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires" + PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders" + PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature" + + DEFAULT_SIGNATURE = SignatureV2 + DEFAULT_REGION = "region" + DEFAULT_CONNECT_TIMEOUT = 60 + DEFAULT_SOCKET_TIMEOUT = 60 + DEFAULT_HEADER_TIMEOUT = 60 + DEFAULT_IDLE_CONN_TIMEOUT = 30 + DEFAULT_MAX_RETRY_COUNT = 3 + DEFAULT_MAX_REDIRECT_COUNT = 3 + DEFAULT_MAX_CONN_PER_HOST = 1000 + EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD" + LONG_DATE_FORMAT = "20060102T150405Z" + SHORT_DATE_FORMAT = "20060102" + ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z" + ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z" + RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT" + + V4_SERVICE_NAME = "s3" + V4_SERVICE_SUFFIX = "aws4_request" + + V2_HASH_PREFIX = "AWS" + OBS_HASH_PREFIX = "OBS" + + V4_HASH_PREFIX = "AWS4-HMAC-SHA256" + V4_HASH_PRE = "AWS4" + + DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms" + DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms" + + DEFAULT_SSE_C_ENCRYPTION = "AES256" + + HTTP_GET = "GET" + HTTP_POST = "POST" + HTTP_PUT = "PUT" + HTTP_DELETE = "DELETE" + HTTP_HEAD = "HEAD" + HTTP_OPTIONS = "OPTIONS" + + REQUEST_PAYER = "request-payer" + TRAFFIC_LIMIT = "traffic-limit" + MULTI_AZ = "3az" + + MAX_PART_SIZE = 5 * 1024 * 1024 * 1024 + MIN_PART_SIZE = 100 * 1024 + DEFAULT_PART_SIZE = 9 * 1024 * 1024 + MAX_PART_NUM = 10000 +) + +// SignatureType defines type of signature +type SignatureType string + +const ( + // SignatureV2 signature type v2 + SignatureV2 SignatureType = "v2" + // SignatureV4 signature type v4 + SignatureV4 SignatureType = "v4" + // SignatureObs signature type OBS + SignatureObs SignatureType = "OBS" +) + +var ( + interestedHeaders = []string{"content-md5", "content-type", "date"} + + allowedRequestHTTPHeaderMetadataNames = map[string]bool{ + "content-type": true, + "content-md5": true, + "content-length": true, + "content-language": true, + "expires": true, + "origin": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, + "access-control-request-method": true, + "access-control-request-headers": true, + "x-default-storage-class": true, + "location": true, + "date": true, + "etag": true, + "range": true, + "host": true, + "if-modified-since": true, + "if-unmodified-since": true, + "if-match": true, + "if-none-match": true, + "last-modified": true, + "content-range": true, + } + + allowedResourceParameterNames = map[string]bool{ + "acl": true, + "backtosource": true, + "metadata": true, + "policy": true, + "torrent": true, + "logging": true, + "location": true, + "storageinfo": true, + "quota": true, + "storageclass": true, + "storagepolicy": true, + "requestpayment": true, + "versions": true, + "versioning": true, + "versionid": true, + "uploads": true, + "uploadid": true, + "partnumber": true, + "website": true, + "notification": true, + "lifecycle": true, + "deletebucket": true, + "delete": true, + "cors": true, + "restore": true, + "encryption": true, + "tagging": true, + "append": true, + "modify": true, + "position": true, + "replication": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "x-image-process": true, + "x-oss-process": true, + "x-image-save-bucket": true, + "x-image-save-object": true, + "ignore-sign-in-query": true, + } + + mimeTypes = map[string]string{ + "001": "application/x-001", + "301": "application/x-301", + "323": "text/h323", + "7z": "application/x-7z-compressed", + "906": "application/x-906", + "907": "drawing/907", + "IVF": "video/x-ivf", + "a11": "application/x-a11", + "aac": "audio/x-aac", + "acp": "audio/x-mei-aac", + "ai": "application/postscript", + "aif": "audio/aiff", + "aifc": "audio/aiff", + "aiff": "audio/aiff", + "anv": "application/x-anv", + "apk": "application/vnd.android.package-archive", + "asa": "text/asa", + "asf": "video/x-ms-asf", + "asp": "text/asp", + "asx": "video/x-ms-asf", + "atom": "application/atom+xml", + "au": "audio/basic", + "avi": "video/avi", + "awf": "application/vnd.adobe.workflow", + "biz": "text/xml", + "bmp": "application/x-bmp", + "bot": "application/x-bot", + "bz2": "application/x-bzip2", + "c4t": "application/x-c4t", + "c90": "application/x-c90", + "cal": "application/x-cals", + "cat": "application/vnd.ms-pki.seccat", + "cdf": "application/x-netcdf", + "cdr": "application/x-cdr", + "cel": "application/x-cel", + "cer": "application/x-x509-ca-cert", + "cg4": "application/x-g4", + "cgm": "application/x-cgm", + "cit": "application/x-cit", + "class": "java/*", + "cml": "text/xml", + "cmp": "application/x-cmp", + "cmx": "application/x-cmx", + "cot": "application/x-cot", + "crl": "application/pkix-crl", + "crt": "application/x-x509-ca-cert", + "csi": "application/x-csi", + "css": "text/css", + "csv": "text/csv", + "cu": "application/cu-seeme", + "cut": "application/x-cut", + "dbf": "application/x-dbf", + "dbm": "application/x-dbm", + "dbx": "application/x-dbx", + "dcd": "text/xml", + "dcx": "application/x-dcx", + "deb": "application/x-debian-package", + "der": "application/x-x509-ca-cert", + "dgn": "application/x-dgn", + "dib": "application/x-dib", + "dll": "application/x-msdownload", + "doc": "application/msword", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "dot": "application/msword", + "drw": "application/x-drw", + "dtd": "text/xml", + "dvi": "application/x-dvi", + "dwf": "application/x-dwf", + "dwg": "application/x-dwg", + "dxb": "application/x-dxb", + "dxf": "application/x-dxf", + "edn": "application/vnd.adobe.edn", + "emf": "application/x-emf", + "eml": "message/rfc822", + "ent": "text/xml", + "eot": "application/vnd.ms-fontobject", + "epi": "application/x-epi", + "eps": "application/postscript", + "epub": "application/epub+zip", + "etd": "application/x-ebx", + "etx": "text/x-setext", + "exe": "application/x-msdownload", + "fax": "image/fax", + "fdf": "application/vnd.fdf", + "fif": "application/fractals", + "flac": "audio/flac", + "flv": "video/x-flv", + "fo": "text/xml", + "frm": "application/x-frm", + "g4": "application/x-g4", + "gbr": "application/x-gbr", + "gif": "image/gif", + "gl2": "application/x-gl2", + "gp4": "application/x-gp4", + "gz": "application/gzip", + "hgl": "application/x-hgl", + "hmr": "application/x-hmr", + "hpg": "application/x-hpgl", + "hpl": "application/x-hpl", + "hqx": "application/mac-binhex40", + "hrf": "application/x-hrf", + "hta": "application/hta", + "htc": "text/x-component", + "htm": "text/html", + "html": "text/html", + "htt": "text/webviewhtml", + "htx": "text/html", + "icb": "application/x-icb", + "ico": "application/x-ico", + "ics": "text/calendar", + "iff": "application/x-iff", + "ig4": "application/x-g4", + "igs": "application/x-igs", + "iii": "application/x-iphone", + "img": "application/x-img", + "ini": "text/plain", + "ins": "application/x-internet-signup", + "ipa": "application/vnd.iphone", + "iso": "application/x-iso9660-image", + "isp": "application/x-internet-signup", + "jar": "application/java-archive", + "java": "java/*", + "jfif": "image/jpeg", + "jpe": "image/jpeg", + "jpeg": "image/jpeg", + "jpg": "image/jpeg", + "js": "application/x-javascript", + "json": "application/json", + "jsp": "text/html", + "la1": "audio/x-liquid-file", + "lar": "application/x-laplayer-reg", + "latex": "application/x-latex", + "lavs": "audio/x-liquid-secure", + "lbm": "application/x-lbm", + "lmsff": "audio/x-la-lms", + "log": "text/plain", + "ls": "application/x-javascript", + "ltr": "application/x-ltr", + "m1v": "video/x-mpeg", + "m2v": "video/x-mpeg", + "m3u": "audio/mpegurl", + "m4a": "audio/mp4", + "m4e": "video/mpeg4", + "m4v": "video/mp4", + "mac": "application/x-mac", + "man": "application/x-troff-man", + "math": "text/xml", + "mdb": "application/msaccess", + "mfp": "application/x-shockwave-flash", + "mht": "message/rfc822", + "mhtml": "message/rfc822", + "mi": "application/x-mi", + "mid": "audio/mid", + "midi": "audio/mid", + "mil": "application/x-mil", + "mml": "text/xml", + "mnd": "audio/x-musicnet-download", + "mns": "audio/x-musicnet-stream", + "mocha": "application/x-javascript", + "mov": "video/quicktime", + "movie": "video/x-sgi-movie", + "mp1": "audio/mp1", + "mp2": "audio/mp2", + "mp2v": "video/mpeg", + "mp3": "audio/mp3", + "mp4": "video/mp4", + "mp4a": "audio/mp4", + "mp4v": "video/mp4", + "mpa": "video/x-mpg", + "mpd": "application/vnd.ms-project", + "mpe": "video/mpeg", + "mpeg": "video/mpeg", + "mpg": "video/mpeg", + "mpg4": "video/mp4", + "mpga": "audio/rn-mpeg", + "mpp": "application/vnd.ms-project", + "mps": "video/x-mpeg", + "mpt": "application/vnd.ms-project", + "mpv": "video/mpg", + "mpv2": "video/mpeg", + "mpw": "application/vnd.ms-project", + "mpx": "application/vnd.ms-project", + "mtx": "text/xml", + "mxp": "application/x-mmxp", + "net": "image/pnetvue", + "nrf": "application/x-nrf", + "nws": "message/rfc822", + "odc": "text/x-ms-odc", + "oga": "audio/ogg", + "ogg": "audio/ogg", + "ogv": "video/ogg", + "ogx": "application/ogg", + "out": "application/x-out", + "p10": "application/pkcs10", + "p12": "application/x-pkcs12", + "p7b": "application/x-pkcs7-certificates", + "p7c": "application/pkcs7-mime", + "p7m": "application/pkcs7-mime", + "p7r": "application/x-pkcs7-certreqresp", + "p7s": "application/pkcs7-signature", + "pbm": "image/x-portable-bitmap", + "pc5": "application/x-pc5", + "pci": "application/x-pci", + "pcl": "application/x-pcl", + "pcx": "application/x-pcx", + "pdf": "application/pdf", + "pdx": "application/vnd.adobe.pdx", + "pfx": "application/x-pkcs12", + "pgl": "application/x-pgl", + "pgm": "image/x-portable-graymap", + "pic": "application/x-pic", + "pko": "application/vnd.ms-pki.pko", + "pl": "application/x-perl", + "plg": "text/html", + "pls": "audio/scpls", + "plt": "application/x-plt", + "png": "image/png", + "pnm": "image/x-portable-anymap", + "pot": "application/vnd.ms-powerpoint", + "ppa": "application/vnd.ms-powerpoint", + "ppm": "application/x-ppm", + "pps": "application/vnd.ms-powerpoint", + "ppt": "application/vnd.ms-powerpoint", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "pr": "application/x-pr", + "prf": "application/pics-rules", + "prn": "application/x-prn", + "prt": "application/x-prt", + "ps": "application/postscript", + "ptn": "application/x-ptn", + "pwz": "application/vnd.ms-powerpoint", + "qt": "video/quicktime", + "r3t": "text/vnd.rn-realtext3d", + "ra": "audio/vnd.rn-realaudio", + "ram": "audio/x-pn-realaudio", + "rar": "application/x-rar-compressed", + "ras": "application/x-ras", + "rat": "application/rat-file", + "rdf": "text/xml", + "rec": "application/vnd.rn-recording", + "red": "application/x-red", + "rgb": "application/x-rgb", + "rjs": "application/vnd.rn-realsystem-rjs", + "rjt": "application/vnd.rn-realsystem-rjt", + "rlc": "application/x-rlc", + "rle": "application/x-rle", + "rm": "application/vnd.rn-realmedia", + "rmf": "application/vnd.adobe.rmf", + "rmi": "audio/mid", + "rmj": "application/vnd.rn-realsystem-rmj", + "rmm": "audio/x-pn-realaudio", + "rmp": "application/vnd.rn-rn_music_package", + "rms": "application/vnd.rn-realmedia-secure", + "rmvb": "application/vnd.rn-realmedia-vbr", + "rmx": "application/vnd.rn-realsystem-rmx", + "rnx": "application/vnd.rn-realplayer", + "rp": "image/vnd.rn-realpix", + "rpm": "audio/x-pn-realaudio-plugin", + "rsml": "application/vnd.rn-rsml", + "rss": "application/rss+xml", + "rt": "text/vnd.rn-realtext", + "rtf": "application/x-rtf", + "rv": "video/vnd.rn-realvideo", + "sam": "application/x-sam", + "sat": "application/x-sat", + "sdp": "application/sdp", + "sdw": "application/x-sdw", + "sgm": "text/sgml", + "sgml": "text/sgml", + "sis": "application/vnd.symbian.install", + "sisx": "application/vnd.symbian.install", + "sit": "application/x-stuffit", + "slb": "application/x-slb", + "sld": "application/x-sld", + "slk": "drawing/x-slk", + "smi": "application/smil", + "smil": "application/smil", + "smk": "application/x-smk", + "snd": "audio/basic", + "sol": "text/plain", + "sor": "text/plain", + "spc": "application/x-pkcs7-certificates", + "spl": "application/futuresplash", + "spp": "text/xml", + "ssm": "application/streamingmedia", + "sst": "application/vnd.ms-pki.certstore", + "stl": "application/vnd.ms-pki.stl", + "stm": "text/html", + "sty": "application/x-sty", + "svg": "image/svg+xml", + "swf": "application/x-shockwave-flash", + "tar": "application/x-tar", + "tdf": "application/x-tdf", + "tg4": "application/x-tg4", + "tga": "application/x-tga", + "tif": "image/tiff", + "tiff": "image/tiff", + "tld": "text/xml", + "top": "drawing/x-top", + "torrent": "application/x-bittorrent", + "tsd": "text/xml", + "ttf": "application/x-font-ttf", + "txt": "text/plain", + "uin": "application/x-icq", + "uls": "text/iuls", + "vcf": "text/x-vcard", + "vda": "application/x-vda", + "vdx": "application/vnd.visio", + "vml": "text/xml", + "vpg": "application/x-vpeg005", + "vsd": "application/vnd.visio", + "vss": "application/vnd.visio", + "vst": "application/x-vst", + "vsw": "application/vnd.visio", + "vsx": "application/vnd.visio", + "vtx": "application/vnd.visio", + "vxml": "text/xml", + "wav": "audio/wav", + "wax": "audio/x-ms-wax", + "wb1": "application/x-wb1", + "wb2": "application/x-wb2", + "wb3": "application/x-wb3", + "wbmp": "image/vnd.wap.wbmp", + "webm": "video/webm", + "wiz": "application/msword", + "wk3": "application/x-wk3", + "wk4": "application/x-wk4", + "wkq": "application/x-wkq", + "wks": "application/x-wks", + "wm": "video/x-ms-wm", + "wma": "audio/x-ms-wma", + "wmd": "application/x-ms-wmd", + "wmf": "application/x-wmf", + "wml": "text/vnd.wap.wml", + "wmv": "video/x-ms-wmv", + "wmx": "video/x-ms-wmx", + "wmz": "application/x-ms-wmz", + "woff": "application/x-font-woff", + "wp6": "application/x-wp6", + "wpd": "application/x-wpd", + "wpg": "application/x-wpg", + "wpl": "application/vnd.ms-wpl", + "wq1": "application/x-wq1", + "wr1": "application/x-wr1", + "wri": "application/x-wri", + "wrk": "application/x-wrk", + "ws": "application/x-ws", + "ws2": "application/x-ws", + "wsc": "text/scriptlet", + "wsdl": "text/xml", + "wvx": "video/x-ms-wvx", + "x_b": "application/x-x_b", + "x_t": "application/x-x_t", + "xap": "application/x-silverlight-app", + "xbm": "image/x-xbitmap", + "xdp": "application/vnd.adobe.xdp", + "xdr": "text/xml", + "xfd": "application/vnd.adobe.xfd", + "xfdf": "application/vnd.adobe.xfdf", + "xhtml": "text/html", + "xls": "application/vnd.ms-excel", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "xlw": "application/x-xlw", + "xml": "text/xml", + "xpl": "audio/scpls", + "xpm": "image/x-xpixmap", + "xq": "text/xml", + "xql": "text/xml", + "xquery": "text/xml", + "xsd": "text/xml", + "xsl": "text/xml", + "xslt": "text/xml", + "xwd": "application/x-xwd", + "yaml": "text/yaml", + "yml": "text/yaml", + "zip": "application/zip", + } +) + +// HttpMethodType defines http method type +type HttpMethodType string + +const ( + HttpMethodGet HttpMethodType = HTTP_GET + HttpMethodPut HttpMethodType = HTTP_PUT + HttpMethodPost HttpMethodType = HTTP_POST + HttpMethodDelete HttpMethodType = HTTP_DELETE + HttpMethodHead HttpMethodType = HTTP_HEAD + HttpMethodOptions HttpMethodType = HTTP_OPTIONS +) + +// SubResourceType defines the subResource value +type SubResourceType string + +const ( + // SubResourceStoragePolicy subResource value: storagePolicy + SubResourceStoragePolicy SubResourceType = "storagePolicy" + + // SubResourceStorageClass subResource value: storageClass + SubResourceStorageClass SubResourceType = "storageClass" + + // SubResourceQuota subResource value: quota + SubResourceQuota SubResourceType = "quota" + + // SubResourceStorageInfo subResource value: storageinfo + SubResourceStorageInfo SubResourceType = "storageinfo" + + // SubResourceLocation subResource value: location + SubResourceLocation SubResourceType = "location" + + // SubResourceAcl subResource value: acl + SubResourceAcl SubResourceType = "acl" + + // SubResourcePolicy subResource value: policy + SubResourcePolicy SubResourceType = "policy" + + // SubResourceCors subResource value: cors + SubResourceCors SubResourceType = "cors" + + // SubResourceVersioning subResource value: versioning + SubResourceVersioning SubResourceType = "versioning" + + // SubResourceWebsite subResource value: website + SubResourceWebsite SubResourceType = "website" + + // SubResourceLogging subResource value: logging + SubResourceLogging SubResourceType = "logging" + + // SubResourceLifecycle subResource value: lifecycle + SubResourceLifecycle SubResourceType = "lifecycle" + + // SubResourceNotification subResource value: notification + SubResourceNotification SubResourceType = "notification" + + // SubResourceEncryption subResource value: encryption + SubResourceEncryption SubResourceType = "encryption" + + // SubResourceTagging subResource value: tagging + SubResourceTagging SubResourceType = "tagging" + + // SubResourceDelete subResource value: delete + SubResourceDelete SubResourceType = "delete" + + // SubResourceVersions subResource value: versions + SubResourceVersions SubResourceType = "versions" + + // SubResourceUploads subResource value: uploads + SubResourceUploads SubResourceType = "uploads" + + // SubResourceRestore subResource value: restore + SubResourceRestore SubResourceType = "restore" + + // SubResourceMetadata subResource value: metadata + SubResourceMetadata SubResourceType = "metadata" + + // SubResourceRequestPayment subResource value: requestPayment + SubResourceRequestPayment SubResourceType = "requestPayment" + + // SubResourceAppend subResource value: append + SubResourceAppend SubResourceType = "append" + + // SubResourceModify subResource value: modify + SubResourceModify SubResourceType = "modify" +) + +// objectKeyType defines the objectKey value +type objectKeyType string + +const ( + // objectKeyExtensionPolicy objectKey value: v1/extension_policy + objectKeyExtensionPolicy objectKeyType = "v1/extension_policy" + + // objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs + objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs" +) + +// AclType defines bucket/object acl type +type AclType string + +const ( + AclPrivate AclType = "private" + AclPublicRead AclType = "public-read" + AclPublicReadWrite AclType = "public-read-write" + AclAuthenticatedRead AclType = "authenticated-read" + AclBucketOwnerRead AclType = "bucket-owner-read" + AclBucketOwnerFullControl AclType = "bucket-owner-full-control" + AclLogDeliveryWrite AclType = "log-delivery-write" + AclPublicReadDelivery AclType = "public-read-delivered" + AclPublicReadWriteDelivery AclType = "public-read-write-delivered" +) + +// StorageClassType defines bucket storage class +type StorageClassType string + +const ( + //StorageClassStandard storage class: STANDARD + StorageClassStandard StorageClassType = "STANDARD" + + //StorageClassWarm storage class: WARM + StorageClassWarm StorageClassType = "WARM" + + //StorageClassCold storage class: COLD + StorageClassCold StorageClassType = "COLD" + + storageClassStandardIA StorageClassType = "STANDARD_IA" + storageClassGlacier StorageClassType = "GLACIER" +) + +// PermissionType defines permission type +type PermissionType string + +const ( + // PermissionRead permission type: READ + PermissionRead PermissionType = "READ" + + // PermissionWrite permission type: WRITE + PermissionWrite PermissionType = "WRITE" + + // PermissionReadAcp permission type: READ_ACP + PermissionReadAcp PermissionType = "READ_ACP" + + // PermissionWriteAcp permission type: WRITE_ACP + PermissionWriteAcp PermissionType = "WRITE_ACP" + + // PermissionFullControl permission type: FULL_CONTROL + PermissionFullControl PermissionType = "FULL_CONTROL" +) + +// GranteeType defines grantee type +type GranteeType string + +const ( + // GranteeGroup grantee type: Group + GranteeGroup GranteeType = "Group" + + // GranteeUser grantee type: CanonicalUser + GranteeUser GranteeType = "CanonicalUser" +) + +// GroupUriType defines grantee uri type +type GroupUriType string + +const ( + // GroupAllUsers grantee uri type: AllUsers + GroupAllUsers GroupUriType = "AllUsers" + + // GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers + GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers" + + // GroupLogDelivery grantee uri type: LogDelivery + GroupLogDelivery GroupUriType = "LogDelivery" +) + +// VersioningStatusType defines bucket version status +type VersioningStatusType string + +const ( + // VersioningStatusEnabled version status: Enabled + VersioningStatusEnabled VersioningStatusType = "Enabled" + + // VersioningStatusSuspended version status: Suspended + VersioningStatusSuspended VersioningStatusType = "Suspended" +) + +// ProtocolType defines protocol type +type ProtocolType string + +const ( + // ProtocolHttp prorocol type: http + ProtocolHttp ProtocolType = "http" + + // ProtocolHttps prorocol type: https + ProtocolHttps ProtocolType = "https" +) + +// RuleStatusType defines lifeCycle rule status +type RuleStatusType string + +const ( + // RuleStatusEnabled rule status: Enabled + RuleStatusEnabled RuleStatusType = "Enabled" + + // RuleStatusDisabled rule status: Disabled + RuleStatusDisabled RuleStatusType = "Disabled" +) + +// RestoreTierType defines restore options +type RestoreTierType string + +const ( + // RestoreTierExpedited restore options: Expedited + RestoreTierExpedited RestoreTierType = "Expedited" + + // RestoreTierStandard restore options: Standard + RestoreTierStandard RestoreTierType = "Standard" + + // RestoreTierBulk restore options: Bulk + RestoreTierBulk RestoreTierType = "Bulk" +) + +// MetadataDirectiveType defines metadata operation indicator +type MetadataDirectiveType string + +const ( + // CopyMetadata metadata operation: COPY + CopyMetadata MetadataDirectiveType = "COPY" + + // ReplaceNew metadata operation: REPLACE_NEW + ReplaceNew MetadataDirectiveType = "REPLACE_NEW" + + // ReplaceMetadata metadata operation: REPLACE + ReplaceMetadata MetadataDirectiveType = "REPLACE" +) + +// EventType defines bucket notification type of events +type EventType string + +const ( + // ObjectCreatedAll type of events: ObjectCreated:* + ObjectCreatedAll EventType = "ObjectCreated:*" + + // ObjectCreatedPut type of events: ObjectCreated:Put + ObjectCreatedPut EventType = "ObjectCreated:Put" + + // ObjectCreatedPost type of events: ObjectCreated:Post + ObjectCreatedPost EventType = "ObjectCreated:Post" + + // ObjectCreatedCopy type of events: ObjectCreated:Copy + ObjectCreatedCopy EventType = "ObjectCreated:Copy" + + // ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload + ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload" + + // ObjectRemovedAll type of events: ObjectRemoved:* + ObjectRemovedAll EventType = "ObjectRemoved:*" + + // ObjectRemovedDelete type of events: ObjectRemoved:Delete + ObjectRemovedDelete EventType = "ObjectRemoved:Delete" + + // ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated + ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated" +) + +// PayerType defines type of payer +type PayerType string + +const ( + // BucketOwnerPayer type of payer: BucketOwner + BucketOwnerPayer PayerType = "BucketOwner" + + // RequesterPayer type of payer: Requester + RequesterPayer PayerType = "Requester" + + // Requester header for requester-Pays + Requester PayerType = "requester" +) + +// FetchPolicyStatusType defines type of fetch policy status +type FetchPolicyStatusType string + +const ( + // FetchStatusOpen type of status: open + FetchStatusOpen FetchPolicyStatusType = "open" + + // FetchStatusClosed type of status: closed + FetchStatusClosed FetchPolicyStatusType = "closed" +) + +// AvailableZoneType defines type of az redundancy +type AvailableZoneType string + +const ( + AvailableZoneMultiAz AvailableZoneType = "3az" +) + +// FSStatusType defines type of file system status +type FSStatusType string + +const ( + FSStatusEnabled FSStatusType = "Enabled" + FSStatusDisabled FSStatusType = "Disabled" +) + +// BucketType defines type of bucket +type BucketType string + +const ( + OBJECT BucketType = "OBJECT" + POSIX BucketType = "POSIX" +) diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go new file mode 100644 index 0000000..06b6762 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go @@ -0,0 +1,1114 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +func cleanHeaderPrefix(header http.Header) map[string][]string { + responseHeaders := make(map[string][]string) + for key, value := range header { + if len(value) > 0 { + key = strings.ToLower(key) + if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) { + key = key[len(HEADER_PREFIX):] + } + responseHeaders[key] = value + } + } + return responseHeaders +} + +// ParseStringToEventType converts string value to EventType value and returns it +func ParseStringToEventType(value string) (ret EventType) { + switch value { + case "ObjectCreated:*", "s3:ObjectCreated:*": + ret = ObjectCreatedAll + case "ObjectCreated:Put", "s3:ObjectCreated:Put": + ret = ObjectCreatedPut + case "ObjectCreated:Post", "s3:ObjectCreated:Post": + ret = ObjectCreatedPost + case "ObjectCreated:Copy", "s3:ObjectCreated:Copy": + ret = ObjectCreatedCopy + case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload": + ret = ObjectCreatedCompleteMultipartUpload + case "ObjectRemoved:*", "s3:ObjectRemoved:*": + ret = ObjectRemovedAll + case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete": + ret = ObjectRemovedDelete + case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated": + ret = ObjectRemovedDeleteMarkerCreated + default: + ret = "" + } + return +} + +// ParseStringToStorageClassType converts string value to StorageClassType value and returns it +func ParseStringToStorageClassType(value string) (ret StorageClassType) { + switch value { + case "STANDARD": + ret = StorageClassStandard + case "STANDARD_IA", "WARM": + ret = StorageClassWarm + case "GLACIER", "COLD": + ret = StorageClassCold + default: + ret = "" + } + return +} + +func prepareGrantURI(grant Grant) string { + if grant.Grantee.URI == GroupAllUsers || grant.Grantee.URI == GroupAuthenticatedUsers { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/global/", grant.Grantee.URI) + } + if grant.Grantee.URI == GroupLogDelivery { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/s3/", grant.Grantee.URI) + } + return fmt.Sprintf("%s", grant.Grantee.URI) +} + +func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string { + xml := make([]string, 0, 4) + + if grant.Grantee.Type == GranteeUser { + if isObs { + xml = append(xml, "") + } else { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + } + if grant.Grantee.ID != "" { + granteeID := XmlTranscoding(grant.Grantee.ID) + xml = append(xml, fmt.Sprintf("%s", granteeID)) + } + if !isObs && grant.Grantee.DisplayName != "" { + granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName) + xml = append(xml, fmt.Sprintf("%s", granteeDisplayName)) + } + xml = append(xml, "") + } else { + if !isObs { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + xml = append(xml, prepareGrantURI(grant)) + xml = append(xml, "") + } else if grant.Grantee.URI == GroupAllUsers { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("Everyone")) + xml = append(xml, "") + } else { + return strings.Join(xml, "") + } + } + + xml = append(xml, fmt.Sprintf("%s", grant.Permission)) + if isObs && isBucket { + xml = append(xml, fmt.Sprintf("%t", grant.Delivered)) + } + xml = append(xml, fmt.Sprintf("")) + return strings.Join(xml, "") +} + +func hasLoggingTarget(input BucketLoggingStatus) bool { + if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 { + return true + } + return false +} + +// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it +func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) { + grantsLength := len(input.TargetGrants) + xml := make([]string, 0, 8+grantsLength) + + xml = append(xml, "") + if isObs && input.Agency != "" { + agency := XmlTranscoding(input.Agency) + xml = append(xml, fmt.Sprintf("%s", agency)) + } + if hasLoggingTarget(input) { + xml = append(xml, "") + if input.TargetBucket != "" { + xml = append(xml, fmt.Sprintf("%s", input.TargetBucket)) + } + if input.TargetPrefix != "" { + targetPrefix := XmlTranscoding(input.TargetPrefix) + xml = append(xml, fmt.Sprintf("%s", targetPrefix)) + } + if grantsLength > 0 { + xml = append(xml, "") + for _, grant := range input.TargetGrants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + } + + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it +func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + if isObs && input.Delivered != "" { + objectDelivered := XmlTranscoding(input.Delivered) + xml = append(xml, fmt.Sprintf("%s", objectDelivered)) + } else { + xml = append(xml, "") + } + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + + xml = append(xml, "") + + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, true)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertConditionToXML(condition Condition) string { + xml := make([]string, 0, 2) + if condition.KeyPrefixEquals != "" { + keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals) + xml = append(xml, fmt.Sprintf("%s", keyPrefixEquals)) + } + if condition.HttpErrorCodeReturnedEquals != "" { + xml = append(xml, fmt.Sprintf("%s", condition.HttpErrorCodeReturnedEquals)) + } + if len(xml) > 0 { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func prepareRoutingRule(input BucketWebsiteConfiguration) string { + xml := make([]string, 0, len(input.RoutingRules)*10) + for _, routingRule := range input.RoutingRules { + xml = append(xml, "") + xml = append(xml, "") + if routingRule.Redirect.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.Protocol)) + } + if routingRule.Redirect.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HostName)) + } + if routingRule.Redirect.ReplaceKeyPrefixWith != "" { + replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyPrefixWith)) + } + + if routingRule.Redirect.ReplaceKeyWith != "" { + replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyWith)) + } + if routingRule.Redirect.HttpRedirectCode != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HttpRedirectCode)) + } + xml = append(xml, "") + + if ret := convertConditionToXML(routingRule.Condition); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + return strings.Join(xml, "") +} + +// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it +func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) { + routingRuleLength := len(input.RoutingRules) + xml := make([]string, 0, 6+routingRuleLength*10) + xml = append(xml, "") + + if input.RedirectAllRequestsTo.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.HostName)) + if input.RedirectAllRequestsTo.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.Protocol)) + } + xml = append(xml, "") + } else { + if input.IndexDocument.Suffix != "" { + indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix) + xml = append(xml, fmt.Sprintf("%s", indexDocumentSuffix)) + } + if input.ErrorDocument.Key != "" { + errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key) + xml = append(xml, fmt.Sprintf("%s", errorDocumentKey)) + } + if routingRuleLength > 0 { + xml = append(xml, "") + xml = append(xml, prepareRoutingRule(input)) + xml = append(xml, "") + } + } + + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertTransitionsToXML(transitions []Transition, isObs bool) string { + if length := len(transitions); length > 0 { + xml := make([]string, 0, length) + for _, transition := range transitions { + var temp string + if transition.Days > 0 { + temp = fmt.Sprintf("%d", transition.Days) + } else if !transition.Date.IsZero() { + temp = fmt.Sprintf("%s", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + if temp != "" { + if !isObs { + storageClass := string(transition.StorageClass) + if transition.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if transition.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + xml = append(xml, fmt.Sprintf("%s%s", temp, storageClass)) + } else { + xml = append(xml, fmt.Sprintf("%s%s", temp, transition.StorageClass)) + } + } + } + return strings.Join(xml, "") + } + return "" +} + +func convertExpirationToXML(expiration Expiration) string { + if expiration.Days > 0 { + return fmt.Sprintf("%d", expiration.Days) + } else if !expiration.Date.IsZero() { + return fmt.Sprintf("%s", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + return "" +} +func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string { + if length := len(noncurrentVersionTransitions); length > 0 { + xml := make([]string, 0, length) + for _, noncurrentVersionTransition := range noncurrentVersionTransitions { + if noncurrentVersionTransition.NoncurrentDays > 0 { + storageClass := string(noncurrentVersionTransition.StorageClass) + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + xml = append(xml, fmt.Sprintf("%d"+ + "%s", + noncurrentVersionTransition.NoncurrentDays, storageClass)) + } + } + return strings.Join(xml, "") + } + return "" +} +func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string { + if noncurrentVersionExpiration.NoncurrentDays > 0 { + return fmt.Sprintf("%d", noncurrentVersionExpiration.NoncurrentDays) + } + return "" +} + +// ConvertLifecyleConfigurationToXml converts BucketLifecyleConfiguration value to XML data and returns it +func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.LifecycleRules)*9) + xml = append(xml, "") + for _, lifecyleRule := range input.LifecycleRules { + xml = append(xml, "") + if lifecyleRule.ID != "" { + lifecyleRuleID := XmlTranscoding(lifecyleRule.ID) + xml = append(xml, fmt.Sprintf("%s", lifecyleRuleID)) + } + lifecyleRulePrefix := XmlTranscoding(lifecyleRule.Prefix) + xml = append(xml, fmt.Sprintf("%s", lifecyleRulePrefix)) + xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Status)) + if ret := convertTransitionsToXML(lifecyleRule.Transitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertExpirationToXML(lifecyleRule.Expiration); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionTransitionsToXML(lifecyleRule.NoncurrentVersionTransitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionExpirationToXML(lifecyleRule.NoncurrentVersionExpiration); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertEncryptionConfigurationToXml converts BucketEncryptionConfiguration value to XML data and returns it +func ConvertEncryptionConfigurationToXml(input BucketEncryptionConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 5) + xml = append(xml, "") + + algorithm := XmlTranscoding(input.SSEAlgorithm) + xml = append(xml, fmt.Sprintf("%s", algorithm)) + + if input.KMSMasterKeyID != "" { + kmsKeyID := XmlTranscoding(input.KMSMasterKeyID) + xml = append(xml, fmt.Sprintf("%s", kmsKeyID)) + } + if input.ProjectID != "" { + projectID := XmlTranscoding(input.ProjectID) + xml = append(xml, fmt.Sprintf("%s", projectID)) + } + + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string { + if length := len(filterRules); length > 0 { + xml := make([]string, 0, length*4) + for _, filterRule := range filterRules { + xml = append(xml, "") + if filterRule.Name != "" { + filterRuleName := XmlTranscoding(filterRule.Name) + xml = append(xml, fmt.Sprintf("%s", filterRuleName)) + } + if filterRule.Value != "" { + filterRuleValue := XmlTranscoding(filterRule.Value) + xml = append(xml, fmt.Sprintf("%s", filterRuleValue)) + } + xml = append(xml, "") + } + if !isObs { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func converntEventsToXML(events []EventType, isObs bool) string { + if length := len(events); length > 0 { + xml := make([]string, 0, length) + if !isObs { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s%s", "s3:", event)) + } + } else { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s", event)) + } + } + return strings.Join(xml, "") + } + return "" +} + +func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string { + xml := make([]string, 0, 6) + xml = append(xml, xmlElem) + if topicConfiguration.ID != "" { + topicConfigurationID := XmlTranscoding(topicConfiguration.ID) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationID)) + } + topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationTopic)) + + if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" { + xml = append(xml, ret) + } + tempElem := xmlElem[0:1] + "/" + xmlElem[1:] + xml = append(xml, tempElem) + return strings.Join(xml, "") +} + +// ConverntObsRestoreToXml converts RestoreObjectInput value to XML data and returns it +func ConverntObsRestoreToXml(restoreObjectInput RestoreObjectInput) string { + xml := make([]string, 0, 2) + xml = append(xml, fmt.Sprintf("%d", restoreObjectInput.Days)) + if restoreObjectInput.Tier != "Bulk" { + xml = append(xml, fmt.Sprintf("%s", restoreObjectInput.Tier)) + } + xml = append(xml, fmt.Sprintf("")) + data := strings.Join(xml, "") + return data +} + +// ConvertNotificationToXml converts BucketNotification value to XML data and returns it +func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.TopicConfigurations)*6) + xml = append(xml, "") + for _, topicConfiguration := range input.TopicConfigurations { + ret := converntConfigureToXML(topicConfiguration, "", isObs) + xml = append(xml, ret) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it +func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.Parts)*4) + xml = append(xml, "") + for _, part := range input.Parts { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("%d", part.PartNumber)) + xml = append(xml, fmt.Sprintf("%s", part.ETag)) + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertDeleteObjectsToXML(input DeleteObjectsInput) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Objects)*4) + xml = append(xml, "") + if input.Quiet { + xml = append(xml, fmt.Sprintf("%t", input.Quiet)) + } + if input.EncodingType != "" { + encodingType := XmlTranscoding(input.EncodingType) + xml = append(xml, fmt.Sprintf("%s", encodingType)) + } + for _, obj := range input.Objects { + xml = append(xml, "") + key := XmlTranscoding(obj.Key) + xml = append(xml, fmt.Sprintf("%s", key)) + if obj.VersionId != "" { + xml = append(xml, fmt.Sprintf("%s", obj.VersionId)) + } + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + md5 = Base64Md5([]byte(data)) + return +} + +func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) { + if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok { + sseCHeader := SseCHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok { + sseCHeader.KeyMD5 = ret[0] + } + sseHeader = sseCHeader + } else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok { + sseKmsHeader := SseKmsHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok { + sseKmsHeader.Key = ret[0] + } else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok { + sseKmsHeader.Key = ret[0] + } + sseHeader = sseKmsHeader + } + return +} + +func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) { + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok { + AllowOrigin = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok { + AllowHeader = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok { + MaxAgeSeconds = StringToInt(ret[0], 0) + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok { + AllowMethod = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok { + ExposeHeader = ret[0] + } + return +} + +func parseUnCommonHeader(output *GetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok { + output.Expiration = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok { + output.Restore = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok { + output.ObjectType = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok { + output.NextAppendPosition = ret[0] + } +} + +// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers +func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + parseUnCommonHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } + + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok { + ret, err := time.Parse(time.RFC1123, ret[0]) + if err == nil { + output.LastModified = ret + } + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok { + output.ContentLength = StringToInt64(ret[0], 0) + } + + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } + +} + +// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers +func ParseCopyObjectOutput(output *CopyObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok { + output.CopySourceVersionId = ret[0] + } +} + +// ParsePutObjectOutput sets PutObjectOutput field values with response headers +func ParsePutObjectOutput(output *PutObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers +func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseUploadPartOutput sets UploadPartOutput field values with response headers +func ParseUploadPartOutput(output *UploadPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers +func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } +} + +// ParseCopyPartOutput sets CopyPartOutput field values with response headers +func ParseCopyPartOutput(output *CopyPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers +func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok { + output.Version = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = ret[0] + } else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok { + output.Location = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok { + output.Epid = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_AZ_REDUNDANCY]; ok { + output.AZRedundancy = ret[0] + } + if ret, ok := output.ResponseHeaders[headerFSFileInterface]; ok { + output.FSStatus = parseStringToFSStatusType(ret[0]) + } else { + output.FSStatus = FSStatusDisabled + } +} + +func parseContentHeader(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } +} + +// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers +func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok { + output.MetadataDirective = MetadataDirectiveType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + parseContentHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } +} + +// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers +func ParseDeleteObjectOutput(output *DeleteObjectOutput) { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + + if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = deleteMarker[0] == "true" + } +} + +// ParseGetObjectOutput sets GetObjectOutput field values with response headers +func ParseGetObjectOutput(output *GetObjectOutput) { + ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput) + if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = ret[0] == "true" + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } +} + +// ConvertRequestToIoReaderV2 converts req to XML data +func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) { + data, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data)) + } + return bytes.NewReader(data), Base64Md5(data), nil + } + return nil, "", err +} + +// ConvertRequestToIoReader converts req to XML data +func ConvertRequestToIoReader(req interface{}) (io.Reader, error) { + body, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body)) + } + return bytes.NewReader(body), nil + } + return nil, err +} + +func parseBucketPolicyOutput(s reflect.Type, baseModel IBaseModel, body []byte) { + for i := 0; i < s.NumField(); i++ { + if s.Field(i).Tag == "json:\"body\"" { + reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body)) + break + } + } +} + +// ParseResponseToBaseModel gets response from OBS +func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) { + readCloser, ok := baseModel.(IReadCloser) + if !ok { + defer func() { + errMsg := resp.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body") + } + }() + var body []byte + body, err = ioutil.ReadAll(resp.Body) + if err == nil && len(body) > 0 { + if xmlResult { + err = ParseXml(body, baseModel) + } else { + s := reflect.TypeOf(baseModel).Elem() + if reflect.TypeOf(baseModel).Elem().Name() == "GetBucketPolicyOutput" { + parseBucketPolicyOutput(s, baseModel, body) + } else { + err = parseJSON(body, baseModel) + } + } + if err != nil { + doLog(LEVEL_ERROR, "Unmarshal error: %v", err) + } + } + } else { + readCloser.setReadCloser(resp.Body) + } + + baseModel.setStatusCode(resp.StatusCode) + responseHeaders := cleanHeaderPrefix(resp.Header) + baseModel.setResponseHeaders(responseHeaders) + if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok { + baseModel.setRequestID(values[0]) + } + return +} + +// ParseResponseToObsError gets obsError from OBS +func ParseResponseToObsError(resp *http.Response, isObs bool) error { + isJson := false + if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok { + jsonType, _ := mimeTypes["json"] + isJson = contentType[0] == jsonType + } + obsError := ObsError{} + respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + obsError.Status = resp.Status + return obsError +} + +// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format +func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) { + fetch := map[string]SetBucketFetchPolicyInput{"fetch": input} + json, err := json.Marshal(fetch) + if err != nil { + return "", err + } + data = string(json) + return +} + +// convertFetchJobToJSON converts SetBucketFetchJobInput into json format +func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) { + objectHeaders := make(map[string]string) + for key, value := range input.ObjectHeaders { + if value != "" { + _key := strings.ToLower(key) + if !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + _key = HEADER_PREFIX_META_OBS + _key + } + objectHeaders[_key] = value + } + } + input.ObjectHeaders = objectHeaders + json, err := json.Marshal(input) + if err != nil { + return "", err + } + data = string(json) + return +} + +func parseStringToFSStatusType(value string) (ret FSStatusType) { + switch value { + case "Enabled": + ret = FSStatusEnabled + case "Disabled": + ret = FSStatusDisabled + default: + ret = "" + } + return +} + +func decodeListObjectsOutput(output *ListObjectsOutput) (err error) { + output.Delimiter, err = url.QueryUnescape(output.Delimiter) + if err != nil { + return + } + output.Marker, err = url.QueryUnescape(output.Marker) + if err != nil { + return + } + output.NextMarker, err = url.QueryUnescape(output.NextMarker) + if err != nil { + return + } + output.Prefix, err = url.QueryUnescape(output.Prefix) + if err != nil { + return + } + for index, value := range output.CommonPrefixes { + output.CommonPrefixes[index], err = url.QueryUnescape(value) + if err != nil { + return + } + } + for index, content := range output.Contents { + output.Contents[index].Key, err = url.QueryUnescape(content.Key) + if err != nil { + return + } + } + return +} + +func decodeListVersionsOutput(output *ListVersionsOutput) (err error) { + output.Delimiter, err = url.QueryUnescape(output.Delimiter) + if err != nil { + return + } + output.KeyMarker, err = url.QueryUnescape(output.KeyMarker) + if err != nil { + return + } + output.NextKeyMarker, err = url.QueryUnescape(output.NextKeyMarker) + if err != nil { + return + } + output.Prefix, err = url.QueryUnescape(output.Prefix) + if err != nil { + return + } + for index, version := range output.Versions { + output.Versions[index].Key, err = url.QueryUnescape(version.Key) + if err != nil { + return + } + } + for index, deleteMarker := range output.DeleteMarkers { + output.DeleteMarkers[index].Key, err = url.QueryUnescape(deleteMarker.Key) + if err != nil { + return + } + } + for index, value := range output.CommonPrefixes { + output.CommonPrefixes[index], err = url.QueryUnescape(value) + if err != nil { + return + } + } + return +} + +func decodeDeleteObjectsOutput(output *DeleteObjectsOutput) (err error) { + for index, object := range output.Deleteds { + output.Deleteds[index].Key, err = url.QueryUnescape(object.Key) + if err != nil { + return + } + } + for index, object := range output.Errors { + output.Errors[index].Key, err = url.QueryUnescape(object.Key) + if err != nil { + return + } + } + return +} + +func decodeListMultipartUploadsOutput(output *ListMultipartUploadsOutput) (err error) { + output.Delimiter, err = url.QueryUnescape(output.Delimiter) + if err != nil { + return + } + output.Prefix, err = url.QueryUnescape(output.Prefix) + if err != nil { + return + } + output.KeyMarker, err = url.QueryUnescape(output.KeyMarker) + if err != nil { + return + } + output.NextKeyMarker, err = url.QueryUnescape(output.NextKeyMarker) + if err != nil { + return + } + for index, value := range output.CommonPrefixes { + output.CommonPrefixes[index], err = url.QueryUnescape(value) + if err != nil { + return + } + } + for index, upload := range output.Uploads { + output.Uploads[index].Key, err = url.QueryUnescape(upload.Key) + if err != nil { + return + } + } + return +} + +func decodeListPartsOutput(output *ListPartsOutput) (err error) { + output.Key, err = url.QueryUnescape(output.Key) + return +} + +func decodeInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) (err error) { + output.Key, err = url.QueryUnescape(output.Key) + return +} + +func decodeCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) (err error) { + output.Key, err = url.QueryUnescape(output.Key) + return +} + +// ParseAppendObjectOutput sets AppendObjectOutput field values with response headers +func ParseAppendObjectOutput(output *AppendObjectOutput) (err error) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok { + output.NextAppendPosition, err = strconv.ParseInt(ret[0], 10, 64) + if err != nil { + err = fmt.Errorf("failed to parse next append position with error [%v]", err) + } + } + return +} + +// ParseModifyObjectOutput sets ModifyObjectOutput field values with response headers +func ParseModifyObjectOutput(output *ModifyObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go new file mode 100644 index 0000000..eb8ab28 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go @@ -0,0 +1,34 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "encoding/xml" + "fmt" +) + +// ObsError defines error response from OBS +type ObsError struct { + BaseModel + Status string + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code" json:"code"` + Message string `xml:"Message" json:"message"` + Resource string `xml:"Resource"` + HostId string `xml:"HostId"` +} + +func (err ObsError) Error() string { + return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s", + err.Status, err.Code, err.Message, err.RequestId) +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go new file mode 100644 index 0000000..7fa8116 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go @@ -0,0 +1,41 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "strconv" + "strings" +) + +type extensionOptions interface{} +type extensionHeaders func(headers map[string][]string, isObs bool) error + +func setHeaderPrefix(key string, value string) extensionHeaders { + return func(headers map[string][]string, isObs bool) error { + if strings.TrimSpace(value) == "" { + return fmt.Errorf("set header %s with empty value", key) + } + setHeaders(headers, key, []string{value}, isObs) + return nil + } +} + +// WithReqPaymentHeader sets header for requester-pays +func WithReqPaymentHeader(requester PayerType) extensionHeaders { + return setHeaderPrefix(REQUEST_PAYER, string(requester)) +} + +func WithTrafficLimitHeader(trafficLimit int64) extensionHeaders { + return setHeaderPrefix(TRAFFIC_LIMIT, strconv.FormatInt(trafficLimit, 10)) +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go new file mode 100644 index 0000000..4464c72 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go @@ -0,0 +1,637 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string { + _headers := make(map[string][]string, len(headers)) + if headers != nil { + for key, value := range headers { + key = strings.TrimSpace(key) + if key == "" { + continue + } + _key := strings.ToLower(key) + if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + if !meta { + continue + } + if !isObs { + _key = HEADER_PREFIX_META + _key + } else { + _key = HEADER_PREFIX_META_OBS + _key + } + } else { + _key = key + } + _headers[_key] = value + } + } + return _headers +} + +func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient.doAction(action, method, "", "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions) +} + +func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions) +} + +func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error { + + var resp *http.Response + var respError error + doLog(LEVEL_INFO, "Enter method %s...", action) + start := GetCurrentTimestamp() + + params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs) + if err != nil { + return err + } + + if params == nil { + params = make(map[string]string) + } + + if headers == nil { + headers = make(map[string][]string) + } + + for _, extension := range extensions { + if extensionHeader, ok := extension.(extensionHeaders); ok { + _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) + if _err != nil { + doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err)) + } + } else { + doLog(LEVEL_INFO, "Unsupported extensionOptions") + } + } + + switch method { + case HTTP_GET: + resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_POST: + resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_PUT: + resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_DELETE: + resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_HEAD: + resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_OPTIONS: + resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable) + default: + respError = errors.New("Unexpect http method error") + } + if respError == nil && output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } else { + doLog(LEVEL_WARN, "Do http request with error: %v", respError) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + + return respError +} + +func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func prepareAgentHeader(clientUserAgent string) string { + userAgent := USER_AGENT + if clientUserAgent != "" { + userAgent = clientUserAgent + } + return userAgent +} + +func (obsClient ObsClient) getSignedURLResponse(action string, output IBaseModel, xmlResult bool, resp *http.Response, err error, start int64) (respError error) { + var msg interface{} + if err != nil { + respError = err + resp = nil + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode >= 300 { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + msg = resp.Status + resp = nil + } else { + if output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + } + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } + } + + if msg != nil { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + return +} + +func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) { + req, err := http.NewRequest(method, signedURL, data) + if err != nil { + return err + } + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + var resp *http.Response + + var isSecurityToken bool + var securityToken string + var query []string + parmas := strings.Split(signedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:] + isSecurityToken = true + } + } + } + } + logSignedURL := signedURL + if isSecurityToken { + logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1) + } + doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL) + + req.Header = actualSignedRequestHeaders + if value, ok := req.Header[HEADER_HOST_CAMEL]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST_CAMEL) + } else if value, ok := req.Header[HEADER_HOST]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST) + } + + if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL) + } else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH) + } + + userAgent := prepareAgentHeader(obsClient.conf.userAgent) + req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent} + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + } + + respError = obsClient.getSignedURLResponse(action, output, xmlResult, resp, err, start) + + return +} + +func prepareData(headers map[string][]string, data interface{}) (io.Reader, error) { + var _data io.Reader + if data != nil { + if dataStr, ok := data.(string); ok { + doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr) + headers["Content-Length"] = []string{IntToString(len(dataStr))} + _data = strings.NewReader(dataStr) + } else if dataByte, ok := data.([]byte); ok { + doLog(LEVEL_DEBUG, "Do http request with byte array") + headers["Content-Length"] = []string{IntToString(len(dataByte))} + _data = bytes.NewReader(dataByte) + } else if dataReader, ok := data.(io.Reader); ok { + _data = dataReader + } else { + doLog(LEVEL_WARN, "Data is not a valid io.Reader") + return nil, errors.New("Data is not a valid io.Reader") + } + } + return _data, nil +} + +func (obsClient ObsClient) getRequest(redirectURL, requestURL string, redirectFlag bool, _data io.Reader, method, + bucketName, objectKey string, params map[string]string, headers map[string][]string) (*http.Request, error) { + if redirectURL != "" { + if !redirectFlag { + parsedRedirectURL, err := url.Parse(redirectURL) + if err != nil { + return nil, err + } + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host) + if err != nil { + return nil, err + } + if parsedRequestURL, err := url.Parse(requestURL); err != nil { + return nil, err + } else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" { + redirectURL += "?" + parsedRequestURL.RawQuery + } + } + requestURL = redirectURL + } else { + var err error + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "") + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, requestURL, _data) + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + if err != nil { + return nil, err + } + doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method) + return req, nil +} + +func logHeaders(headers map[string][]string, signature SignatureType) { + if isDebugLogEnabled() { + auth := headers[HEADER_AUTH_CAMEL] + delete(headers, HEADER_AUTH_CAMEL) + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken { + headers[HEADER_STS_TOKEN_AMZ] = []string{"******"} + } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken { + headers[HEADER_STS_TOKEN_OBS] = []string{"******"} + } + doLog(LEVEL_DEBUG, "Request headers: %v", headers) + headers[HEADER_AUTH_CAMEL] = auth + if isSecurityToken { + if signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = securityToken + } else { + headers[HEADER_STS_TOKEN_AMZ] = securityToken + } + } + } +} + +func prepareReq(headers map[string][]string, req, lastRequest *http.Request, clientUserAgent string) *http.Request { + for key, value := range headers { + if key == HEADER_HOST_CAMEL { + req.Host = value[0] + delete(headers, key) + } else if key == HEADER_CONTENT_LENGTH_CAMEL { + req.ContentLength = StringToInt64(value[0], -1) + delete(headers, key) + } else { + req.Header[key] = value + } + } + + lastRequest = req + + userAgent := prepareAgentHeader(clientUserAgent) + req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent} + + if lastRequest != nil { + req.Host = lastRequest.Host + req.ContentLength = lastRequest.ContentLength + } + return lastRequest +} + +func canNotRetry(repeatable bool, statusCode int) bool { + if !repeatable || (statusCode >= 400 && statusCode < 500) || statusCode == 304 { + return true + } + return false +} + +func isRedirectErr(location string, redirectCount, maxRedirectCount int) bool { + if location != "" && redirectCount < maxRedirectCount { + return true + } + return false +} + +func setRedirectFlag(statusCode int, method string) (redirectFlag bool) { + if statusCode == 302 && method == HTTP_GET { + redirectFlag = true + } else { + redirectFlag = false + } + return +} + +func prepareRetry(resp *http.Response, headers map[string][]string, _data io.Reader, msg interface{}) (io.Reader, *http.Response, error) { + if resp != nil { + _err := resp.Body.Close() + checkAndLogErr(_err, LEVEL_WARN, "Failed to close resp body") + resp = nil + } + if _, ok := headers[HEADER_AUTH_CAMEL]; ok { + delete(headers, HEADER_AUTH_CAMEL) + } + doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg) + if r, ok := _data.(*strings.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, nil, err + } + } else if r, ok := _data.(*bytes.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, nil, err + } + } else if r, ok := _data.(*fileReaderWrapper); ok { + fd, err := os.Open(r.filePath) + if err != nil { + return nil, nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath} + fileReaderWrapper.mark = r.mark + fileReaderWrapper.reader = fd + fileReaderWrapper.totalCount = r.totalCount + _data = fileReaderWrapper + _, err = fd.Seek(r.mark, 0) + if err != nil { + errMsg := fd.Close() + checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg) + return nil, nil, err + } + } else if r, ok := _data.(*readerWrapper); ok { + _, err := r.seek(0, 0) + if err != nil { + return nil, nil, err + } + } + return _data, resp, nil +} + +func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) { + + bucketName = strings.TrimSpace(bucketName) + + method = strings.ToUpper(method) + + var redirectURL string + var requestURL string + maxRetryCount := obsClient.conf.maxRetryCount + maxRedirectCount := obsClient.conf.maxRedirectCount + + _data, _err := prepareData(headers, data) + if _err != nil { + return nil, _err + } + + var lastRequest *http.Request + redirectFlag := false + for i, redirectCount := 0, 0; i <= maxRetryCount; i++ { + req, err := obsClient.getRequest(redirectURL, requestURL, redirectFlag, _data, + method, bucketName, objectKey, params, headers) + if err != nil { + return nil, err + } + + logHeaders(headers, obsClient.conf.signature) + + lastRequest = prepareReq(headers, req, lastRequest, obsClient.conf.userAgent) + + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + + var msg interface{} + if err != nil { + msg = err + respError = err + resp = nil + if !repeatable { + break + } + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode < 300 { + respError = nil + break + } else if canNotRetry(repeatable, resp.StatusCode) { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } else if resp.StatusCode >= 300 && resp.StatusCode < 400 { + location := resp.Header.Get(HEADER_LOCATION_CAMEL) + if isRedirectErr(location, redirectCount, maxRedirectCount) { + redirectURL = location + doLog(LEVEL_WARN, "Redirect request to %s", redirectURL) + msg = resp.Status + maxRetryCount++ + redirectCount++ + redirectFlag = setRedirectFlag(resp.StatusCode, method) + } else { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } + } else { + msg = resp.Status + } + } + if i != maxRetryCount { + _data, resp, err = prepareRetry(resp, headers, _data, msg) + if err != nil { + return nil, err + } + if r, ok := _data.(*fileReaderWrapper); ok { + if _fd, _ok := r.reader.(*os.File); _ok { + defer func() { + errMsg := _fd.Close() + checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg) + }() + } + } + time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second))) + } else { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + if resp != nil { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + } + } + } + return +} + +type connDelegate struct { + conn net.Conn + socketTimeout time.Duration + finalTimeout time.Duration +} + +func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate { + return &connDelegate{ + conn: conn, + socketTimeout: time.Second * time.Duration(socketTimeout), + finalTimeout: time.Second * time.Duration(finalTimeout), + } +} + +func (delegate *connDelegate) Read(b []byte) (n int, err error) { + setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + + n, err = delegate.conn.Read(b) + setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout)) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Write(b []byte) (n int, err error) { + setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + + n, err = delegate.conn.Write(b) + finalTimeout := time.Now().Add(delegate.finalTimeout) + setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout) + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Close() error { + return delegate.conn.Close() +} + +func (delegate *connDelegate) LocalAddr() net.Addr { + return delegate.conn.LocalAddr() +} + +func (delegate *connDelegate) RemoteAddr() net.Addr { + return delegate.conn.RemoteAddr() +} + +func (delegate *connDelegate) SetDeadline(t time.Time) error { + return delegate.conn.SetDeadline(t) +} + +func (delegate *connDelegate) SetReadDeadline(t time.Time) error { + return delegate.conn.SetReadDeadline(t) +} + +func (delegate *connDelegate) SetWriteDeadline(t time.Time) error { + return delegate.conn.SetWriteDeadline(t) +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go new file mode 100644 index 0000000..781ac27 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go @@ -0,0 +1,330 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +// Level defines the level of the log +type Level int + +const ( + LEVEL_OFF Level = 500 + LEVEL_ERROR Level = 400 + LEVEL_WARN Level = 300 + LEVEL_INFO Level = 200 + LEVEL_DEBUG Level = 100 +) + +var logLevelMap = map[Level]string{ + LEVEL_OFF: "[OFF]: ", + LEVEL_ERROR: "[ERROR]: ", + LEVEL_WARN: "[WARN]: ", + LEVEL_INFO: "[INFO]: ", + LEVEL_DEBUG: "[DEBUG]: ", +} + +type logConfType struct { + level Level + logToConsole bool + logFullPath string + maxLogSize int64 + backups int +} + +func getDefaultLogConf() logConfType { + return logConfType{ + level: LEVEL_WARN, + logToConsole: false, + logFullPath: "", + maxLogSize: 1024 * 1024 * 30, //30MB + backups: 10, + } +} + +var logConf logConfType + +type loggerWrapper struct { + fullPath string + fd *os.File + ch chan string + wg sync.WaitGroup + queue []string + logger *log.Logger + index int + cacheCount int + closed bool +} + +func (lw *loggerWrapper) doInit() { + lw.queue = make([]string, 0, lw.cacheCount) + lw.logger = log.New(lw.fd, "", 0) + lw.ch = make(chan string, lw.cacheCount) + lw.wg.Add(1) + go lw.doWrite() +} + +func (lw *loggerWrapper) rotate() { + stat, err := lw.fd.Stat() + if err != nil { + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + panic(err) + } + if stat.Size() >= logConf.maxLogSize { + _err := lw.fd.Sync() + if _err != nil { + panic(err) + } + _err = lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + if lw.index > logConf.backups { + lw.index = 1 + } + _err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index)) + if _err != nil { + panic(err) + } + lw.index++ + + fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + panic(err) + } + lw.fd = fd + lw.logger.SetOutput(lw.fd) + } +} + +func (lw *loggerWrapper) doFlush() { + lw.rotate() + for _, m := range lw.queue { + lw.logger.Println(m) + } + err := lw.fd.Sync() + if err != nil { + panic(err) + } +} + +func (lw *loggerWrapper) doClose() { + lw.closed = true + close(lw.ch) + lw.wg.Wait() +} + +func (lw *loggerWrapper) doWrite() { + defer lw.wg.Done() + for { + msg, ok := <-lw.ch + if !ok { + lw.doFlush() + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + break + } + if len(lw.queue) >= lw.cacheCount { + lw.doFlush() + lw.queue = make([]string, 0, lw.cacheCount) + } + lw.queue = append(lw.queue, msg) + } + +} + +func (lw *loggerWrapper) Printf(format string, v ...interface{}) { + if !lw.closed { + msg := fmt.Sprintf(format, v...) + lw.ch <- msg + } +} + +var consoleLogger *log.Logger +var fileLogger *loggerWrapper +var lock = new(sync.RWMutex) + +func isDebugLogEnabled() bool { + return logConf.level <= LEVEL_DEBUG +} + +func isErrorLogEnabled() bool { + return logConf.level <= LEVEL_ERROR +} + +func isWarnLogEnabled() bool { + return logConf.level <= LEVEL_WARN +} + +func isInfoLogEnabled() bool { + return logConf.level <= LEVEL_INFO +} + +func reset() { + if fileLogger != nil { + fileLogger.doClose() + fileLogger = nil + } + consoleLogger = nil + logConf = getDefaultLogConf() +} + +// InitLog enable logging function with default cacheCnt +func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error { + return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50) +} + +// InitLogWithCacheCnt enable logging function +func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error { + lock.Lock() + defer lock.Unlock() + if cacheCnt <= 0 { + cacheCnt = 50 + } + reset() + if fullPath := strings.TrimSpace(logFullPath); fullPath != "" { + _fullPath, err := filepath.Abs(fullPath) + if err != nil { + return err + } + + if !strings.HasSuffix(_fullPath, ".log") { + _fullPath += ".log" + } + + stat, fd, err := initLogFile(_fullPath) + if err != nil { + return err + } + + prefix := stat.Name() + "." + index := 1 + var timeIndex int64 = 0 + walkFunc := func(path string, info os.FileInfo, err error) error { + if err == nil { + if name := info.Name(); strings.HasPrefix(name, prefix) { + if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex { + timeIndex = info.ModTime().Unix() + index = i + 1 + } + } + } + return err + } + + if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return err + } + + fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false} + fileLogger.doInit() + } + if maxLogSize > 0 { + logConf.maxLogSize = maxLogSize + } + if backups > 0 { + logConf.backups = backups + } + logConf.level = level + if logToConsole { + consoleLogger = log.New(os.Stdout, "", log.LstdFlags) + } + return nil +} + +func initLogFile(_fullPath string) (os.FileInfo, *os.File, error) { + stat, err := os.Stat(_fullPath) + if err == nil && stat.IsDir() { + return nil, nil, fmt.Errorf("logFullPath:[%s] is a directory", _fullPath) + } else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil { + return nil, nil, err + } + + fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return nil, nil, err + } + + if stat == nil { + stat, err = os.Stat(_fullPath) + if err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return nil, nil, err + } + } + return stat, fd, nil +} + +// CloseLog disable logging and synchronize cache data to log files +func CloseLog() { + if logEnabled() { + lock.Lock() + defer lock.Unlock() + reset() + } +} + +func logEnabled() bool { + return consoleLogger != nil || fileLogger != nil +} + +// DoLog writes log messages to the logger +func DoLog(level Level, format string, v ...interface{}) { + doLog(level, format, v...) +} + +func doLog(level Level, format string, v ...interface{}) { + if logEnabled() && logConf.level <= level { + msg := fmt.Sprintf(format, v...) + if _, file, line, ok := runtime.Caller(1); ok { + index := strings.LastIndex(file, "/") + if index >= 0 { + file = file[index+1:] + } + msg = fmt.Sprintf("%s:%d|%s", file, line, msg) + } + prefix := logLevelMap[level] + if consoleLogger != nil { + consoleLogger.Printf("%s%s", prefix, msg) + } + if fileLogger != nil { + nowDate := FormatUtcNow("2006-01-02T15:04:05Z") + fileLogger.Printf("%s %s%s", nowDate, prefix, msg) + } + } +} + +func checkAndLogErr(err error, level Level, format string, v ...interface{}) { + if err != nil { + doLog(level, format, v...) + } +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go new file mode 100644 index 0000000..c210a33 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go @@ -0,0 +1,363 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "encoding/xml" + "time" +) + +// Bucket defines bucket properties +type Bucket struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` + Location string `xml:"Location"` + BucketType string `xml:"BucketType,omitempty"` +} + +// Owner defines owner properties +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// Initiator defines initiator properties +type Initiator struct { + XMLName xml.Name `xml:"Initiator"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +type bucketLocationObs struct { + XMLName xml.Name `xml:"Location"` + Location string `xml:",chardata"` +} + +// BucketLocation defines bucket location configuration +type BucketLocation struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + Location string `xml:"LocationConstraint,omitempty"` +} + +// BucketStoragePolicy defines the bucket storage class +type BucketStoragePolicy struct { + XMLName xml.Name `xml:"StoragePolicy"` + StorageClass StorageClassType `xml:"DefaultStorageClass"` +} + +type bucketStoragePolicyObs struct { + XMLName xml.Name `xml:"StorageClass"` + StorageClass string `xml:",chardata"` +} + +// Content defines the object content properties +type Content struct { + XMLName xml.Name `xml:"Contents"` + Owner Owner `xml:"Owner"` + ETag string `xml:"ETag"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + Size int64 `xml:"Size"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// Version defines the properties of versioning objects +type Version struct { + DeleteMarker + XMLName xml.Name `xml:"Version"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` +} + +// DeleteMarker defines the properties of versioning delete markers +type DeleteMarker struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + IsLatest bool `xml:"IsLatest"` + LastModified time.Time `xml:"LastModified"` + Owner Owner `xml:"Owner"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// Upload defines multipart upload properties +type Upload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + Initiated time.Time `xml:"Initiated"` + StorageClass StorageClassType `xml:"StorageClass"` + Owner Owner `xml:"Owner"` + Initiator Initiator `xml:"Initiator"` +} + +// BucketQuota defines bucket quota configuration +type BucketQuota struct { + XMLName xml.Name `xml:"Quota"` + Quota int64 `xml:"StorageQuota"` +} + +// Grantee defines grantee properties +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI GroupUriType `xml:"URI,omitempty"` +} + +type granteeObs struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + Canned string `xml:"Canned,omitempty"` +} + +// Grant defines grant properties +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} + +type grantObs struct { + XMLName xml.Name `xml:"Grant"` + Grantee granteeObs `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} + +// AccessControlPolicy defines access control policy properties +type AccessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []Grant `xml:"AccessControlList>Grant"` + Delivered string `xml:"Delivered,omitempty"` +} + +type accessControlPolicyObs struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []grantObs `xml:"AccessControlList>Grant"` +} + +// CorsRule defines the CORS rules +type CorsRule struct { + XMLName xml.Name `xml:"CORSRule"` + ID string `xml:"ID,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin"` + AllowedMethod []string `xml:"AllowedMethod"` + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` +} + +// BucketCors defines the bucket CORS configuration +type BucketCors struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CorsRules []CorsRule `xml:"CORSRule"` +} + +// BucketVersioningConfiguration defines the versioning configuration +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status VersioningStatusType `xml:"Status"` +} + +// IndexDocument defines the default page configuration +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +// ErrorDocument defines the error page configuration +type ErrorDocument struct { + Key string `xml:"Key,omitempty"` +} + +// Condition defines condition in RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` + HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"` +} + +// Redirect defines redirect in RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName,omitempty"` + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` + HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"` +} + +// RoutingRule defines routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + Condition Condition `xml:"Condition,omitempty"` + Redirect Redirect `xml:"Redirect"` +} + +// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration +type RedirectAllRequestsTo struct { + XMLName xml.Name `xml:"RedirectAllRequestsTo"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName"` +} + +// BucketWebsiteConfiguration defines the bucket website configuration +type BucketWebsiteConfiguration struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` +} + +// BucketLoggingStatus defines the bucket logging configuration +type BucketLoggingStatus struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + Agency string `xml:"Agency,omitempty"` + TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"` + TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"` + TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"` +} + +// Transition defines transition property in LifecycleRule +type Transition struct { + XMLName xml.Name `xml:"Transition"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// Expiration defines expiration property in LifecycleRule +type Expiration struct { + XMLName xml.Name `xml:"Expiration"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` +} + +// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays"` +} + +// LifecycleRule defines lifecycle rule +type LifecycleRule struct { + ID string `xml:"ID,omitempty"` + Prefix string `xml:"Prefix"` + Status RuleStatusType `xml:"Status"` + Transitions []Transition `xml:"Transition,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty"` + NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` +} + +// BucketEncryptionConfiguration defines the bucket encryption configuration +type BucketEncryptionConfiguration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + SSEAlgorithm string `xml:"Rule>ApplyServerSideEncryptionByDefault>SSEAlgorithm"` + KMSMasterKeyID string `xml:"Rule>ApplyServerSideEncryptionByDefault>KMSMasterKeyID,omitempty"` + ProjectID string `xml:"Rule>ApplyServerSideEncryptionByDefault>ProjectID,omitempty"` +} + +// Tag defines tag property in BucketTagging +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BucketTagging defines the bucket tag configuration +type BucketTagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag"` +} + +// FilterRule defines filter rule in TopicConfiguration +type FilterRule struct { + XMLName xml.Name `xml:"FilterRule"` + Name string `xml:"Name,omitempty"` + Value string `xml:"Value,omitempty"` +} + +// TopicConfiguration defines the topic configuration +type TopicConfiguration struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []EventType `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>Object>FilterRule"` +} + +// BucketNotification defines the bucket notification configuration +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"` +} + +type topicConfigurationS3 struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []string `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"` +} + +type bucketNotificationS3 struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"` +} + +// ObjectToDelete defines the object property in DeleteObjectsInput +type ObjectToDelete struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// Deleted defines the deleted property in DeleteObjectsOutput +type Deleted struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + DeleteMarker bool `xml:"DeleteMarker"` + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` +} + +// Part defines the part properties +type Part struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified,omitempty"` + Size int64 `xml:"Size,omitempty"` +} + +// BucketPayer defines the request payment configuration +type BucketPayer struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer PayerType `xml:"Payer"` +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go new file mode 100644 index 0000000..a0ba7f9 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go @@ -0,0 +1,356 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "encoding/xml" +) + +// ListBucketsInput is the input parameter of ListBuckets function +type ListBucketsInput struct { + QueryLocation bool + BucketType BucketType +} + +// ListBucketsOutput is the result of ListBuckets function +type ListBucketsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Owner Owner `xml:"Owner"` + Buckets []Bucket `xml:"Buckets>Bucket"` +} + +// CreateBucketInput is the input parameter of CreateBucket function +type CreateBucketInput struct { + BucketLocation + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + StorageClass StorageClassType `xml:"-"` + GrantReadId string `xml:"-"` + GrantWriteId string `xml:"-"` + GrantReadAcpId string `xml:"-"` + GrantWriteAcpId string `xml:"-"` + GrantFullControlId string `xml:"-"` + GrantReadDeliveredId string `xml:"-"` + GrantFullControlDeliveredId string `xml:"-"` + Epid string `xml:"-"` + AvailableZone string `xml:"-"` + IsFSFileInterface bool `xml:"-"` +} + +// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function +type SetBucketStoragePolicyInput struct { + Bucket string `xml:"-"` + BucketStoragePolicy +} + +type getBucketStoragePolicyOutputS3 struct { + BaseModel + BucketStoragePolicy +} + +// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function +type GetBucketStoragePolicyOutput struct { + BaseModel + StorageClass string +} + +type getBucketStoragePolicyOutputObs struct { + BaseModel + bucketStoragePolicyObs +} + +// SetBucketQuotaInput is the input parameter of SetBucketQuota function +type SetBucketQuotaInput struct { + Bucket string `xml:"-"` + BucketQuota +} + +// GetBucketQuotaOutput is the result of GetBucketQuota function +type GetBucketQuotaOutput struct { + BaseModel + BucketQuota +} + +// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function +type GetBucketStorageInfoOutput struct { + BaseModel + XMLName xml.Name `xml:"GetBucketStorageInfoResult"` + Size int64 `xml:"Size"` + ObjectNumber int `xml:"ObjectNumber"` +} + +type getBucketLocationOutputS3 struct { + BaseModel + BucketLocation +} +type getBucketLocationOutputObs struct { + BaseModel + bucketLocationObs +} + +// GetBucketLocationOutput is the result of GetBucketLocation function +type GetBucketLocationOutput struct { + BaseModel + Location string `xml:"-"` +} + +// GetBucketAclOutput is the result of GetBucketAcl function +type GetBucketAclOutput struct { + BaseModel + AccessControlPolicy +} + +type getBucketACLOutputObs struct { + BaseModel + accessControlPolicyObs +} + +// SetBucketAclInput is the input parameter of SetBucketAcl function +type SetBucketAclInput struct { + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// SetBucketPolicyInput is the input parameter of SetBucketPolicy function +type SetBucketPolicyInput struct { + Bucket string + Policy string +} + +// GetBucketPolicyOutput is the result of GetBucketPolicy function +type GetBucketPolicyOutput struct { + BaseModel + Policy string `json:"body"` +} + +// SetBucketCorsInput is the input parameter of SetBucketCors function +type SetBucketCorsInput struct { + Bucket string `xml:"-"` + BucketCors +} + +// GetBucketCorsOutput is the result of GetBucketCors function +type GetBucketCorsOutput struct { + BaseModel + BucketCors +} + +// SetBucketVersioningInput is the input parameter of SetBucketVersioning function +type SetBucketVersioningInput struct { + Bucket string `xml:"-"` + BucketVersioningConfiguration +} + +// GetBucketVersioningOutput is the result of GetBucketVersioning function +type GetBucketVersioningOutput struct { + BaseModel + BucketVersioningConfiguration +} + +// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function +type SetBucketWebsiteConfigurationInput struct { + Bucket string `xml:"-"` + BucketWebsiteConfiguration +} + +// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function +type GetBucketWebsiteConfigurationOutput struct { + BaseModel + BucketWebsiteConfiguration +} + +// GetBucketMetadataInput is the input parameter of GetBucketMetadata function +type GetBucketMetadataInput struct { + Bucket string + Origin string + RequestHeader string +} + +// SetObjectMetadataInput is the input parameter of SetObjectMetadata function +type SetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +//SetObjectMetadataOutput is the result of SetObjectMetadata function +type SetObjectMetadataOutput struct { + BaseModel + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +// GetBucketMetadataOutput is the result of GetBucketMetadata function +type GetBucketMetadataOutput struct { + BaseModel + StorageClass StorageClassType + Location string + Version string + AllowOrigin string + AllowMethod string + AllowHeader string + MaxAgeSeconds int + ExposeHeader string + Epid string + AZRedundancy string + FSStatus FSStatusType +} + +// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function +type SetBucketLoggingConfigurationInput struct { + Bucket string `xml:"-"` + BucketLoggingStatus +} + +// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function +type GetBucketLoggingConfigurationOutput struct { + BaseModel + BucketLoggingStatus +} + +// BucketLifecyleConfiguration defines the bucket lifecycle configuration +type BucketLifecyleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + LifecycleRules []LifecycleRule `xml:"Rule"` +} + +// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function +type SetBucketLifecycleConfigurationInput struct { + Bucket string `xml:"-"` + BucketLifecyleConfiguration +} + +// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function +type GetBucketLifecycleConfigurationOutput struct { + BaseModel + BucketLifecyleConfiguration +} + +// SetBucketEncryptionInput is the input parameter of SetBucketEncryption function +type SetBucketEncryptionInput struct { + Bucket string `xml:"-"` + BucketEncryptionConfiguration +} + +// GetBucketEncryptionOutput is the result of GetBucketEncryption function +type GetBucketEncryptionOutput struct { + BaseModel + BucketEncryptionConfiguration +} + +// SetBucketTaggingInput is the input parameter of SetBucketTagging function +type SetBucketTaggingInput struct { + Bucket string `xml:"-"` + BucketTagging +} + +// GetBucketTaggingOutput is the result of GetBucketTagging function +type GetBucketTaggingOutput struct { + BaseModel + BucketTagging +} + +// SetBucketNotificationInput is the input parameter of SetBucketNotification function +type SetBucketNotificationInput struct { + Bucket string `xml:"-"` + BucketNotification +} + +type getBucketNotificationOutputS3 struct { + BaseModel + bucketNotificationS3 +} + +// GetBucketNotificationOutput is the result of GetBucketNotification function +type GetBucketNotificationOutput struct { + BaseModel + BucketNotification +} + +// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function +type SetBucketFetchPolicyInput struct { + Bucket string + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function +type GetBucketFetchPolicyInput struct { + Bucket string +} + +// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function +type GetBucketFetchPolicyOutput struct { + BaseModel + FetchResponse `json:"fetch"` +} + +// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function +type DeleteBucketFetchPolicyInput struct { + Bucket string +} + +// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function +type SetBucketFetchJobInput struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host,omitempty"` + Key string `json:"key,omitempty"` + Md5 string `json:"md5,omitempty"` + CallBackURL string `json:"callbackurl,omitempty"` + CallBackBody string `json:"callbackbody,omitempty"` + CallBackBodyType string `json:"callbackbodytype,omitempty"` + CallBackHost string `json:"callbackhost,omitempty"` + FileType string `json:"file_type,omitempty"` + IgnoreSameKey bool `json:"ignore_same_key,omitempty"` + ObjectHeaders map[string]string `json:"objectheaders,omitempty"` + Etag string `json:"etag,omitempty"` + TrustName string `json:"trustname,omitempty"` +} + +// SetBucketFetchJobOutput is the result of SetBucketFetchJob function +type SetBucketFetchJobOutput struct { + BaseModel + SetBucketFetchJobResponse +} + +// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function +type GetBucketFetchJobInput struct { + Bucket string + JobID string +} + +// GetBucketFetchJobOutput is the result of GetBucketFetchJob function +type GetBucketFetchJobOutput struct { + BaseModel + GetBucketFetchJobResponse +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go new file mode 100644 index 0000000..c76d9f0 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go @@ -0,0 +1,32 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +// ISseHeader defines the sse encryption header +type ISseHeader interface { + GetEncryption() string + GetKey() string +} + +// SseKmsHeader defines the SseKms header +type SseKmsHeader struct { + Encryption string + Key string + isObs bool +} + +// SseCHeader defines the SseC header +type SseCHeader struct { + Encryption string + Key string + KeyMD5 string +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go new file mode 100644 index 0000000..b599e89 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go @@ -0,0 +1,345 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "encoding/xml" + "io" + "time" +) + +// ListObjsInput defines parameters for listing objects +type ListObjsInput struct { + Prefix string + MaxKeys int + Delimiter string + Origin string + RequestHeader string + EncodingType string +} + +// ListObjectsInput is the input parameter of ListObjects function +type ListObjectsInput struct { + ListObjsInput + Bucket string + Marker string +} + +// ListObjectsOutput is the result of ListObjects function +type ListObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListBucketResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Contents []Content `xml:"Contents"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// ListVersionsInput is the input parameter of ListVersions function +type ListVersionsInput struct { + ListObjsInput + Bucket string + KeyMarker string + VersionIdMarker string +} + +// ListVersionsOutput is the result of ListVersions function +type ListVersionsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListVersionsResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + VersionIdMarker string `xml:"VersionIdMarker"` + NextVersionIdMarker string `xml:"NextVersionIdMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Versions []Version `xml:"Version"` + DeleteMarkers []DeleteMarker `xml:"DeleteMarker"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// DeleteObjectInput is the input parameter of DeleteObject function +type DeleteObjectInput struct { + Bucket string + Key string + VersionId string +} + +// DeleteObjectOutput is the result of DeleteObject function +type DeleteObjectOutput struct { + BaseModel + VersionId string + DeleteMarker bool +} + +// DeleteObjectsInput is the input parameter of DeleteObjects function +type DeleteObjectsInput struct { + Bucket string `xml:"-"` + XMLName xml.Name `xml:"Delete"` + Quiet bool `xml:"Quiet,omitempty"` + Objects []ObjectToDelete `xml:"Object"` + EncodingType string `xml:"EncodingType"` +} + +// DeleteObjectsOutput is the result of DeleteObjects function +type DeleteObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"DeleteResult"` + Deleteds []Deleted `xml:"Deleted"` + Errors []Error `xml:"Error"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// SetObjectAclInput is the input parameter of SetObjectAcl function +type SetObjectAclInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// GetObjectAclInput is the input parameter of GetObjectAcl function +type GetObjectAclInput struct { + Bucket string + Key string + VersionId string +} + +// GetObjectAclOutput is the result of GetObjectAcl function +type GetObjectAclOutput struct { + BaseModel + VersionId string + AccessControlPolicy +} + +// RestoreObjectInput is the input parameter of RestoreObject function +type RestoreObjectInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + XMLName xml.Name `xml:"RestoreRequest"` + Days int `xml:"Days"` + Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"` +} + +// GetObjectMetadataInput is the input parameter of GetObjectMetadata function +type GetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + Origin string + RequestHeader string + SseHeader ISseHeader +} + +// GetObjectMetadataOutput is the result of GetObjectMetadata function +type GetObjectMetadataOutput struct { + BaseModel + VersionId string + WebsiteRedirectLocation string + Expiration string + Restore string + ObjectType string + NextAppendPosition string + StorageClass StorageClassType + ContentLength int64 + ContentType string + ETag string + AllowOrigin string + AllowHeader string + AllowMethod string + ExposeHeader string + MaxAgeSeconds int + LastModified time.Time + SseHeader ISseHeader + Metadata map[string]string +} + +// GetObjectInput is the input parameter of GetObject function +type GetObjectInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfUnmodifiedSince time.Time + IfModifiedSince time.Time + RangeStart int64 + RangeEnd int64 + ImageProcess string + ResponseCacheControl string + ResponseContentDisposition string + ResponseContentEncoding string + ResponseContentLanguage string + ResponseContentType string + ResponseExpires string +} + +// GetObjectOutput is the result of GetObject function +type GetObjectOutput struct { + GetObjectMetadataOutput + DeleteMarker bool + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + Expires string + Body io.ReadCloser +} + +// ObjectOperationInput defines the object operation properties +type ObjectOperationInput struct { + Bucket string + Key string + ACL AclType + GrantReadId string + GrantReadAcpId string + GrantWriteAcpId string + GrantFullControlId string + StorageClass StorageClassType + WebsiteRedirectLocation string + Expires int64 + SseHeader ISseHeader + Metadata map[string]string +} + +// PutObjectBasicInput defines the basic object operation properties +type PutObjectBasicInput struct { + ObjectOperationInput + ContentType string + ContentMD5 string + ContentLength int64 +} + +// PutObjectInput is the input parameter of PutObject function +type PutObjectInput struct { + PutObjectBasicInput + Body io.Reader +} + +// PutFileInput is the input parameter of PutFile function +type PutFileInput struct { + PutObjectBasicInput + SourceFile string +} + +// PutObjectOutput is the result of PutObject function +type PutObjectOutput struct { + BaseModel + VersionId string + SseHeader ISseHeader + StorageClass StorageClassType + ETag string +} + +// CopyObjectInput is the input parameter of CopyObject function +type CopyObjectInput struct { + ObjectOperationInput + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceIfMatch string + CopySourceIfNoneMatch string + CopySourceIfUnmodifiedSince time.Time + CopySourceIfModifiedSince time.Time + SourceSseHeader ISseHeader + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + MetadataDirective MetadataDirectiveType + SuccessActionRedirect string +} + +// CopyObjectOutput is the result of CopyObject function +type CopyObjectOutput struct { + BaseModel + CopySourceVersionId string `xml:"-"` + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +// UploadFileInput is the input parameter of UploadFile function +type UploadFileInput struct { + ObjectOperationInput + ContentType string + UploadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string + EncodingType string +} + +// DownloadFileInput is the input parameter of DownloadFile function +type DownloadFileInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + DownloadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string +} + +type AppendObjectInput struct { + PutObjectBasicInput + Body io.Reader + Position int64 +} + +type AppendObjectOutput struct { + BaseModel + VersionId string + SseHeader ISseHeader + NextAppendPosition int64 + ETag string +} + +type ModifyObjectInput struct { + Bucket string + Key string + Position int64 + Body io.Reader + ContentLength int64 +} + +type ModifyObjectOutput struct { + BaseModel + ETag string +} + +// HeadObjectInput is the input parameter of HeadObject function +type HeadObjectInput struct { + Bucket string + Key string + VersionId string +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go new file mode 100644 index 0000000..8ede31d --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go @@ -0,0 +1,63 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "net/http" +) + +// CreateSignedUrlInput is the input parameter of CreateSignedUrl function +type CreateSignedUrlInput struct { + Method HttpMethodType + Bucket string + Key string + SubResource SubResourceType + Expires int + Headers map[string]string + QueryParams map[string]string +} + +// CreateSignedUrlOutput is the result of CreateSignedUrl function +type CreateSignedUrlOutput struct { + SignedUrl string + ActualSignedRequestHeaders http.Header +} + +// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureInput struct { + Bucket string + Key string + Expires int + FormParams map[string]string +} + +// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureOutput struct { + OriginPolicy string + Policy string + Algorithm string + Credential string + Date string + Signature string +} + +// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function +type SetBucketRequestPaymentInput struct { + Bucket string `xml:"-"` + BucketPayer +} + +// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function +type GetBucketRequestPaymentOutput struct { + BaseModel + BucketPayer +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go new file mode 100644 index 0000000..a6752b8 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go @@ -0,0 +1,170 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "encoding/xml" + "io" + "time" +) + +// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function +type ListMultipartUploadsInput struct { + Bucket string + Prefix string + MaxUploads int + Delimiter string + KeyMarker string + UploadIdMarker string + EncodingType string +} + +// ListMultipartUploadsOutput is the result of ListMultipartUploads function +type ListMultipartUploadsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + UploadIdMarker string `xml:"UploadIdMarker"` + NextUploadIdMarker string `xml:"NextUploadIdMarker"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + MaxUploads int `xml:"MaxUploads"` + Prefix string `xml:"Prefix"` + Uploads []Upload `xml:"Upload"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function +type AbortMultipartUploadInput struct { + Bucket string + Key string + UploadId string +} + +// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function +type InitiateMultipartUploadInput struct { + ObjectOperationInput + ContentType string + EncodingType string +} + +// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function +type InitiateMultipartUploadOutput struct { + BaseModel + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + SseHeader ISseHeader + EncodingType string `xml:"EncodingType,omitempty"` +} + +// UploadPartInput is the input parameter of UploadPart function +type UploadPartInput struct { + Bucket string + Key string + PartNumber int + UploadId string + ContentMD5 string + SseHeader ISseHeader + Body io.Reader + SourceFile string + Offset int64 + PartSize int64 +} + +// UploadPartOutput is the result of UploadPart function +type UploadPartOutput struct { + BaseModel + PartNumber int + ETag string + SseHeader ISseHeader +} + +// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function +type CompleteMultipartUploadInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + UploadId string `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []Part `xml:"Part"` + EncodingType string `xml:"-"` +} + +// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function +type CompleteMultipartUploadOutput struct { + BaseModel + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + ETag string `xml:"ETag"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// ListPartsInput is the input parameter of ListParts function +type ListPartsInput struct { + Bucket string + Key string + UploadId string + MaxParts int + PartNumberMarker int + EncodingType string +} + +// ListPartsOutput is the result of ListParts function +type ListPartsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + PartNumberMarker int `xml:"PartNumberMarker"` + NextPartNumberMarker int `xml:"NextPartNumberMarker"` + MaxParts int `xml:"MaxParts"` + IsTruncated bool `xml:"IsTruncated"` + StorageClass StorageClassType `xml:"StorageClass"` + Initiator Initiator `xml:"Initiator"` + Owner Owner `xml:"Owner"` + Parts []Part `xml:"Part"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// CopyPartInput is the input parameter of CopyPart function +type CopyPartInput struct { + Bucket string + Key string + UploadId string + PartNumber int + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceRangeStart int64 + CopySourceRangeEnd int64 + SseHeader ISseHeader + SourceSseHeader ISseHeader +} + +// CopyPartOutput is the result of CopyPart function +type CopyPartOutput struct { + BaseModel + XMLName xml.Name `xml:"CopyPartResult"` + PartNumber int `xml:"-"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified"` + SseHeader ISseHeader `xml:"-"` +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go new file mode 100644 index 0000000..a8db53e --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go @@ -0,0 +1,67 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. +package obs + +import ( + "encoding/xml" +) + +// BaseModel defines base model response from OBS +type BaseModel struct { + StatusCode int `xml:"-"` + RequestId string `xml:"RequestId" json:"request_id"` + ResponseHeaders map[string][]string `xml:"-"` +} + +// Error defines the error property in DeleteObjectsOutput +type Error struct { + XMLName xml.Name `xml:"Error"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +// FetchResponse defines the response fetch policy configuration +type FetchResponse struct { + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration +type SetBucketFetchJobResponse struct { + ID string `json:"id"` + Wait int `json:"Wait"` +} + +// GetBucketFetchJobResponse defines the response fetch job configuration +type GetBucketFetchJobResponse struct { + Err string `json:"err"` + Code string `json:"code"` + Status string `json:"status"` + Job JobResponse `json:"job"` +} + +// JobResponse defines the response job configuration +type JobResponse struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host"` + Key string `json:"key"` + Md5 string `json:"md5"` + CallBackURL string `json:"callbackurl"` + CallBackBody string `json:"callbackbody"` + CallBackBodyType string `json:"callbackbodytype"` + CallBackHost string `json:"callbackhost"` + FileType string `json:"file_type"` + IgnoreSameKey bool `json:"ignore_same_key"` +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go new file mode 100644 index 0000000..1755c96 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go @@ -0,0 +1,542 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +package obs + +import ( + "errors" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" +) + +// Future defines interface with function: Get +type Future interface { + Get() interface{} +} + +// FutureResult for task result +type FutureResult struct { + result interface{} + resultChan chan interface{} + lock sync.Mutex +} + +type panicResult struct { + presult interface{} +} + +func (f *FutureResult) checkPanic() interface{} { + if r, ok := f.result.(panicResult); ok { + panic(r.presult) + } + return f.result +} + +// Get gets the task result +func (f *FutureResult) Get() interface{} { + if f.resultChan == nil { + return f.checkPanic() + } + f.lock.Lock() + defer f.lock.Unlock() + if f.resultChan == nil { + return f.checkPanic() + } + + f.result = <-f.resultChan + close(f.resultChan) + f.resultChan = nil + return f.checkPanic() +} + +// Task defines interface with function: Run +type Task interface { + Run() interface{} +} + +type funcWrapper struct { + f func() interface{} +} + +func (fw *funcWrapper) Run() interface{} { + if fw.f != nil { + return fw.f() + } + return nil +} + +type taskWrapper struct { + t Task + f *FutureResult +} + +func (tw *taskWrapper) Run() interface{} { + if tw.t != nil { + return tw.t.Run() + } + return nil +} + +type signalTask struct { + id string +} + +func (signalTask) Run() interface{} { + return nil +} + +type worker struct { + name string + taskQueue chan Task + wg *sync.WaitGroup + pool *RoutinePool +} + +func runTask(t Task) { + if tw, ok := t.(*taskWrapper); ok { + defer func() { + if r := recover(); r != nil { + tw.f.resultChan <- panicResult{ + presult: r, + } + } + }() + ret := t.Run() + tw.f.resultChan <- ret + } else { + t.Run() + } +} + +func (*worker) runTask(t Task) { + runTask(t) +} + +func (w *worker) start() { + go func() { + defer func() { + if w.wg != nil { + w.wg.Done() + } + }() + for { + task, ok := <-w.taskQueue + if !ok { + break + } + w.pool.AddCurrentWorkingCnt(1) + w.runTask(task) + w.pool.AddCurrentWorkingCnt(-1) + if w.pool.autoTuneWorker(w) { + break + } + } + }() +} + +func (w *worker) release() { + w.taskQueue = nil + w.wg = nil + w.pool = nil +} + +// Pool defines coroutine pool interface +type Pool interface { + ShutDown() + Submit(t Task) (Future, error) + SubmitFunc(f func() interface{}) (Future, error) + Execute(t Task) + ExecuteFunc(f func() interface{}) + GetMaxWorkerCnt() int64 + AddMaxWorkerCnt(value int64) int64 + GetCurrentWorkingCnt() int64 + AddCurrentWorkingCnt(value int64) int64 + GetWorkerCnt() int64 + AddWorkerCnt(value int64) int64 + EnableAutoTune() +} + +type basicPool struct { + maxWorkerCnt int64 + workerCnt int64 + currentWorkingCnt int64 + isShutDown int32 +} + +// ErrTaskInvalid will be returned if the task is nil +var ErrTaskInvalid = errors.New("Task is nil") + +func (pool *basicPool) GetCurrentWorkingCnt() int64 { + return atomic.LoadInt64(&pool.currentWorkingCnt) +} + +func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 { + return atomic.AddInt64(&pool.currentWorkingCnt, value) +} + +func (pool *basicPool) GetWorkerCnt() int64 { + return atomic.LoadInt64(&pool.workerCnt) +} + +func (pool *basicPool) AddWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.workerCnt, value) +} + +func (pool *basicPool) GetMaxWorkerCnt() int64 { + return atomic.LoadInt64(&pool.maxWorkerCnt) +} + +func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.maxWorkerCnt, value) +} + +func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool { + return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue) +} + +func (pool *basicPool) EnableAutoTune() { + +} + +// RoutinePool defines the coroutine pool struct +type RoutinePool struct { + basicPool + taskQueue chan Task + dispatchQueue chan Task + workers map[string]*worker + cacheCnt int + wg *sync.WaitGroup + lock *sync.Mutex + shutDownWg *sync.WaitGroup + autoTune int32 +} + +// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function +var ErrSubmitTimeout = errors.New("Submit task timeout") + +// ErrPoolShutDown will be returned if RoutinePool is shutdown +var ErrPoolShutDown = errors.New("RoutinePool is shutdown") + +// ErrTaskReject will be returned if submit task is rejected +var ErrTaskReject = errors.New("Submit task is rejected") + +var closeQueue = signalTask{id: "closeQueue"} + +// NewRoutinePool creates a RoutinePool instance +func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &RoutinePool{ + cacheCnt: cacheCnt, + wg: new(sync.WaitGroup), + lock: new(sync.Mutex), + shutDownWg: new(sync.WaitGroup), + autoTune: 0, + } + pool.isShutDown = 0 + pool.maxWorkerCnt += int64(maxWorkerCnt) + if pool.cacheCnt <= 0 { + pool.taskQueue = make(chan Task) + } else { + pool.taskQueue = make(chan Task, pool.cacheCnt) + } + pool.workers = make(map[string]*worker, pool.maxWorkerCnt) + // dispatchQueue must not have length + pool.dispatchQueue = make(chan Task) + pool.dispatcher() + + return pool +} + +// EnableAutoTune sets the autoTune enabled +func (pool *RoutinePool) EnableAutoTune() { + atomic.StoreInt32(&pool.autoTune, 1) +} + +func (pool *RoutinePool) checkStatus(t Task) error { + if t == nil { + return ErrTaskInvalid + } + + if atomic.LoadInt32(&pool.isShutDown) == 1 { + return ErrPoolShutDown + } + return nil +} + +func (pool *RoutinePool) dispatcher() { + pool.shutDownWg.Add(1) + go func() { + for { + task, ok := <-pool.dispatchQueue + if !ok { + break + } + + if task == closeQueue { + close(pool.taskQueue) + pool.shutDownWg.Done() + continue + } + + if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() { + pool.addWorker() + } + + pool.taskQueue <- task + } + }() +} + +// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it +func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 { + if atomic.LoadInt32(&pool.autoTune) == 1 { + return pool.basicPool.AddMaxWorkerCnt(value) + } + return pool.GetMaxWorkerCnt() +} + +func (pool *RoutinePool) addWorker() { + if atomic.LoadInt32(&pool.autoTune) == 1 { + pool.lock.Lock() + defer pool.lock.Unlock() + } + w := &worker{} + w.name = fmt.Sprintf("woker-%d", len(pool.workers)) + w.taskQueue = pool.taskQueue + w.wg = pool.wg + pool.AddWorkerCnt(1) + w.pool = pool + pool.workers[w.name] = w + pool.wg.Add(1) + w.start() +} + +func (pool *RoutinePool) autoTuneWorker(w *worker) bool { + if atomic.LoadInt32(&pool.autoTune) == 0 { + return false + } + + if w == nil { + return false + } + + workerCnt := pool.GetWorkerCnt() + maxWorkerCnt := pool.GetMaxWorkerCnt() + if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) { + pool.lock.Lock() + defer pool.lock.Unlock() + delete(pool.workers, w.name) + w.wg.Done() + w.release() + return true + } + + return false +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *RoutinePool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Execute pushes the specified task to the dispatchQueue +func (pool *RoutinePool) Execute(t Task) { + if t != nil { + pool.dispatchQueue <- t + } +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} + +// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info +func (pool *RoutinePool) Submit(t Task) (Future, error) { + if err := pool.checkStatus(t); err != nil { + return nil, err + } + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + pool.dispatchQueue <- tw + return f, nil +} + +// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info. +// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time. +func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) { + if timeout <= 0 { + return pool.Submit(t) + } + if err := pool.checkStatus(t); err != nil { + return nil, err + } + timeoutChan := make(chan bool, 1) + go func() { + time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout))) + timeoutChan <- true + close(timeoutChan) + }() + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + select { + case pool.dispatchQueue <- tw: + return f, nil + case _, ok := <-timeoutChan: + if ok { + return nil, ErrSubmitTimeout + } + return nil, ErrSubmitTimeout + } +} + +func (pool *RoutinePool) beforeCloseDispatchQueue() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.dispatchQueue <- closeQueue + pool.wg.Wait() +} + +func (pool *RoutinePool) doCloseDispatchQueue() { + close(pool.dispatchQueue) + pool.shutDownWg.Wait() +} + +// ShutDown closes the RoutinePool instance +func (pool *RoutinePool) ShutDown() { + pool.beforeCloseDispatchQueue() + pool.doCloseDispatchQueue() + for _, w := range pool.workers { + w.release() + } + pool.workers = nil + pool.taskQueue = nil + pool.dispatchQueue = nil +} + +// NoChanPool defines the coroutine pool struct +type NoChanPool struct { + basicPool + wg *sync.WaitGroup + tokens chan interface{} +} + +// NewNochanPool creates a new NoChanPool instance +func NewNochanPool(maxWorkerCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &NoChanPool{ + wg: new(sync.WaitGroup), + tokens: make(chan interface{}, maxWorkerCnt), + } + pool.isShutDown = 0 + pool.AddMaxWorkerCnt(int64(maxWorkerCnt)) + + for i := 0; i < maxWorkerCnt; i++ { + pool.tokens <- struct{}{} + } + + return pool +} + +func (pool *NoChanPool) acquire() { + <-pool.tokens +} + +func (pool *NoChanPool) release() { + pool.tokens <- 1 +} + +func (pool *NoChanPool) execute(t Task) { + pool.wg.Add(1) + go func() { + pool.acquire() + defer func() { + pool.release() + pool.wg.Done() + }() + runTask(t) + }() +} + +// ShutDown closes the NoChanPool instance +func (pool *NoChanPool) ShutDown() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.wg.Wait() +} + +// Execute executes the specified task +func (pool *NoChanPool) Execute(t Task) { + if t != nil { + pool.execute(t) + } +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *NoChanPool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Submit executes the specified task, and returns the FutureResult and error info +func (pool *NoChanPool) Submit(t Task) (Future, error) { + if t == nil { + return nil, ErrTaskInvalid + } + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + + pool.execute(tw) + return f, nil +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go new file mode 100644 index 0000000..2e485c1 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go @@ -0,0 +1,243 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "encoding/json" + "io/ioutil" + "math/rand" + "net" + "net/http" + "os" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + accessKeyEnv = "OBS_ACCESS_KEY_ID" + securityKeyEnv = "OBS_SECRET_ACCESS_KEY" + securityTokenEnv = "OBS_SECURITY_TOKEN" + ecsRequestURL = "http://169.254.169.254/openstack/latest/securitykey" +) + +type securityHolder struct { + ak string + sk string + securityToken string +} + +var emptySecurityHolder = securityHolder{} + +type securityProvider interface { + getSecurity() securityHolder +} + +type BasicSecurityProvider struct { + val atomic.Value +} + +func (bsp *BasicSecurityProvider) getSecurity() securityHolder { + if sh, ok := bsp.val.Load().(securityHolder); ok { + return sh + } + return emptySecurityHolder +} + +func (bsp *BasicSecurityProvider) refresh(ak, sk, securityToken string) { + bsp.val.Store(securityHolder{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)}) +} + +func NewBasicSecurityProvider(ak, sk, securityToken string) *BasicSecurityProvider { + bsp := &BasicSecurityProvider{} + bsp.refresh(ak, sk, securityToken) + return bsp +} + +type EnvSecurityProvider struct { + sh securityHolder + suffix string + once sync.Once +} + +func (esp *EnvSecurityProvider) getSecurity() securityHolder { + //ensure run only once + esp.once.Do(func() { + esp.sh = securityHolder{ + ak: strings.TrimSpace(os.Getenv(accessKeyEnv + esp.suffix)), + sk: strings.TrimSpace(os.Getenv(securityKeyEnv + esp.suffix)), + securityToken: strings.TrimSpace(os.Getenv(securityTokenEnv + esp.suffix)), + } + }) + + return esp.sh +} + +func NewEnvSecurityProvider(suffix string) *EnvSecurityProvider { + if suffix != "" { + suffix = "_" + suffix + } + esp := &EnvSecurityProvider{ + suffix: suffix, + } + return esp +} + +type TemporarySecurityHolder struct { + securityHolder + expireDate time.Time +} + +var emptyTemporarySecurityHolder = TemporarySecurityHolder{} + +type EcsSecurityProvider struct { + val atomic.Value + lock sync.Mutex + httpClient *http.Client + prefetch int32 + retryCount int +} + +func (ecsSp *EcsSecurityProvider) loadTemporarySecurityHolder() (TemporarySecurityHolder, bool) { + if sh := ecsSp.val.Load(); sh == nil { + return emptyTemporarySecurityHolder, false + } else if _sh, ok := sh.(TemporarySecurityHolder); !ok { + return emptyTemporarySecurityHolder, false + } else { + return _sh, true + } +} + +func (ecsSp *EcsSecurityProvider) getAndSetSecurityWithOutLock() securityHolder { + _sh := TemporarySecurityHolder{} + _sh.expireDate = time.Now().Add(time.Minute * 5) + retryCount := 0 + for { + if req, err := http.NewRequest("GET", ecsRequestURL, nil); err == nil { + start := GetCurrentTimestamp() + res, err := ecsSp.httpClient.Do(req) + if err == nil { + if data, _err := ioutil.ReadAll(res.Body); _err == nil { + temp := &struct { + Credential struct { + AK string `json:"access,omitempty"` + SK string `json:"secret,omitempty"` + SecurityToken string `json:"securitytoken,omitempty"` + ExpireDate time.Time `json:"expires_at,omitempty"` + } `json:"credential"` + }{} + + doLog(LEVEL_DEBUG, "Get the json data from ecs succeed") + + if jsonErr := json.Unmarshal(data, temp); jsonErr == nil { + _sh.ak = temp.Credential.AK + _sh.sk = temp.Credential.SK + _sh.securityToken = temp.Credential.SecurityToken + _sh.expireDate = temp.Credential.ExpireDate.Add(time.Minute * -1) + + doLog(LEVEL_INFO, "Get security from ecs succeed, AK:xxxx, SK:xxxx, SecurityToken:xxxx, ExprireDate %s", _sh.expireDate) + + doLog(LEVEL_INFO, "Get security from ecs succeed, cost %d ms", (GetCurrentTimestamp() - start)) + break + } else { + err = jsonErr + } + } else { + err = _err + } + } + + doLog(LEVEL_WARN, "Try to get security from ecs failed, cost %d ms, err %s", (GetCurrentTimestamp() - start), err.Error()) + } + + if retryCount >= ecsSp.retryCount { + doLog(LEVEL_WARN, "Try to get security from ecs failed and exceed the max retry count") + break + } + sleepTime := float64(retryCount+2) * rand.Float64() + if sleepTime > 10 { + sleepTime = 10 + } + time.Sleep(time.Duration(sleepTime * float64(time.Second))) + retryCount++ + } + + ecsSp.val.Store(_sh) + return _sh.securityHolder +} + +func (ecsSp *EcsSecurityProvider) getAndSetSecurity() securityHolder { + ecsSp.lock.Lock() + defer ecsSp.lock.Unlock() + tsh, succeed := ecsSp.loadTemporarySecurityHolder() + if !succeed || time.Now().After(tsh.expireDate) { + return ecsSp.getAndSetSecurityWithOutLock() + } + return tsh.securityHolder +} + +func (ecsSp *EcsSecurityProvider) getSecurity() securityHolder { + if tsh, succeed := ecsSp.loadTemporarySecurityHolder(); succeed { + if time.Now().Before(tsh.expireDate) { + //not expire + if time.Now().Add(time.Minute*5).After(tsh.expireDate) && atomic.CompareAndSwapInt32(&ecsSp.prefetch, 0, 1) { + //do prefetch + sh := ecsSp.getAndSetSecurityWithOutLock() + atomic.CompareAndSwapInt32(&ecsSp.prefetch, 1, 0) + return sh + } + return tsh.securityHolder + } + return ecsSp.getAndSetSecurity() + } + + return ecsSp.getAndSetSecurity() +} + +func getInternalTransport() *http.Transport { + + timeout := 10 + transport := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + start := GetCurrentTimestamp() + conn, err := (&net.Dialer{ + Timeout: time.Second * time.Duration(timeout), + Resolver: net.DefaultResolver, + }).Dial(network, addr) + + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http dial cost %d ms", (GetCurrentTimestamp() - start)) + } + if err != nil { + return nil, err + } + return getConnDelegate(conn, timeout, timeout*10), nil + }, + MaxIdleConns: 10, + MaxIdleConnsPerHost: 10, + ResponseHeaderTimeout: time.Second * time.Duration(timeout), + IdleConnTimeout: time.Second * time.Duration(DEFAULT_IDLE_CONN_TIMEOUT), + DisableCompression: true, + } + + return transport +} + +func NewEcsSecurityProvider(retryCount int) *EcsSecurityProvider { + ecsSp := &EcsSecurityProvider{ + retryCount: retryCount, + } + ecsSp.httpClient = &http.Client{Transport: getInternalTransport(), CheckRedirect: checkRedirectFunc} + return ecsSp +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go new file mode 100644 index 0000000..bb0eb84 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go @@ -0,0 +1,53 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" +) + +// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error +func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) { + if input == nil { + return nil, errors.New("CreateSignedUrlInput is nil") + } + + params := make(map[string]string, len(input.QueryParams)) + for key, value := range input.QueryParams { + params[key] = value + } + + if input.SubResource != "" { + params[string(input.SubResource)] = "" + } + + headers := make(map[string][]string, len(input.Headers)) + for key, value := range input.Headers { + headers[key] = []string{value} + } + + if input.Expires <= 0 { + input.Expires = 300 + } + + requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires)) + if err != nil { + return nil, err + } + + output = &CreateSignedUrlOutput{ + SignedUrl: requestURL, + ActualSignedRequestHeaders: headers, + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go new file mode 100644 index 0000000..d2ae13f --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go @@ -0,0 +1,116 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" + "fmt" + "strings" + "time" +) + +func (obsClient ObsClient) isSecurityToken(params map[string]string, sh securityHolder) { + if sh.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = sh.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = sh.securityToken + } + } +} + +// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput, +// and returns the CreateBrowserBasedSignatureOutput and error +func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) { + if input == nil { + return nil, errors.New("CreateBrowserBasedSignatureInput is nil") + } + + params := make(map[string]string, len(input.FormParams)) + for key, value := range input.FormParams { + params[key] = value + } + + date := time.Now().UTC() + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + sh := obsClient.getSecurity() + + credential, _ := getCredential(sh.ak, obsClient.conf.region, shortDate) + + if input.Expires <= 0 { + input.Expires = 300 + } + + expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT) + if obsClient.conf.signature == SignatureV4 { + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + } + + obsClient.isSecurityToken(params, sh) + + matchAnyBucket := true + matchAnyKey := true + count := 5 + if bucket := strings.TrimSpace(input.Bucket); bucket != "" { + params["bucket"] = bucket + matchAnyBucket = false + count-- + } + + if key := strings.TrimSpace(input.Key); key != "" { + params["key"] = key + matchAnyKey = false + count-- + } + + originPolicySlice := make([]string, 0, len(params)+count) + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration)) + originPolicySlice = append(originPolicySlice, "\"conditions\":[") + for key, value := range params { + if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" { + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value)) + } + } + + if matchAnyBucket { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],") + } + + if matchAnyKey { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],") + } + + originPolicySlice = append(originPolicySlice, "]}") + + originPolicy := strings.Join(originPolicySlice, "") + policy := Base64Encode([]byte(originPolicy)) + var signature string + if obsClient.conf.signature == SignatureV4 { + signature = getSignature(policy, sh.sk, obsClient.conf.region, shortDate) + } else { + signature = Base64Encode(HmacSha1([]byte(sh.sk), []byte(policy))) + } + + output = &CreateBrowserBasedSignatureOutput{ + OriginPolicy: originPolicy, + Policy: policy, + Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL], + Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL], + Date: params[PARAM_DATE_AMZ_CAMEL], + Signature: signature, + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go new file mode 100644 index 0000000..be92782 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go @@ -0,0 +1,758 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "errors" + "io" + "net/http" + "os" + "strings" +) + +// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers +func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) { + output = &ListBucketsOutput{} + err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) { + output = &ListObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + if output.EncodingType == "url" { + err = decodeListObjectsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) { + output = &ListVersionsOutput{} + err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + if output.EncodingType == "url" { + err = decodeListVersionsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a +// specified bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) { + output = &ListMultipartUploadsOutput{} + err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeListMultipartUploadsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err) + output = nil + } + } + return +} + +// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) { + output = &GetBucketNotificationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) { + output = &DeleteObjectOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data +func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) { + output = &DeleteObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeDeleteObjectsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err) + output = nil + } + } + return +} + +// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) { + output = &GetObjectAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) { + output = &GetObjectMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) { + output = &GetObjectOutput{} + err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) { + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path +func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) { + var data io.Reader + sourceFile = strings.TrimSpace(sourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + var contentLength int64 + if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok { + contentLength = StringToInt64(value[0], -1) + } else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok { + contentLength = StringToInt64(value[0], -1) + } else { + contentLength = stat.Size() + } + if contentLength > stat.Size() { + return nil, errors.New("ContentLength is larger than fileSize") + } + fileReaderWrapper.totalCount = contentLength + data = fileReaderWrapper + } + + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers +func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) { + output = &CopyObjectOutput{} + err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers +func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) { + output = &InitiateMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + if output.EncodingType == "url" { + err = decodeInitiateMultipartUploadOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) { + output = &UploadPartOutput{} + err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + } + return +} + +// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) { + output = &CompleteMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + if output.EncodingType == "url" { + err = decodeCompleteMultipartUploadOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err) + output = nil + } + } + } + return +} + +// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) { + output = &ListPartsOutput{} + err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else if output.EncodingType == "url" { + err = decodeListPartsOutput(output) + if err != nil { + doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err) + output = nil + } + } + return +} + +// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) { + output = &CopyPartOutput{} + err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + } + return +} + +// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketEncryptionWithSignedURL sets bucket encryption setting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketEncryption", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketEncryptionWithSignedURL gets bucket encryption setting of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *GetBucketEncryptionOutput, err error) { + output = &GetBucketEncryptionOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketEncryption", HTTP_GET, signedURL, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketEncryptionWithSignedURL deletes bucket encryption setting of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketEncryption", HTTP_DELETE, signedURL, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// AppendObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) AppendObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *AppendObjectOutput, err error) { + output = &AppendObjectOutput{} + err = obsClient.doHTTPWithSignedURL("AppendObject", HTTP_POST, signedURL, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + if err = ParseAppendObjectOutput(output); err != nil { + output = nil + } + } + return +} + +// ModifyObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) ModifyObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *ModifyObjectOutput, err error) { + output = &ModifyObjectOutput{} + err = obsClient.doHTTPWithSignedURL("ModifyObject", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseModifyObjectOutput(output) + } + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait.go new file mode 100644 index 0000000..203fd39 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait.go @@ -0,0 +1,968 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +package obs + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os" + "strconv" + "strings" +) + +// IReadCloser defines interface with function: setReadCloser +type IReadCloser interface { + setReadCloser(body io.ReadCloser) +} + +func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) { + output.Body = body +} + +func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) { + if isObs { + header = HEADER_PREFIX_OBS + header + headers[header] = headerValue + } else { + header = HEADER_PREFIX + header + headers[header] = headerValue + } +} + +func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) { + if isObs { + headers[header] = headerValue + } else { + headers[headerNext] = headerValue + } +} + +// IBaseModel defines interface for base response model +type IBaseModel interface { + setStatusCode(statusCode int) + + setRequestID(requestID string) + + setResponseHeaders(responseHeaders map[string][]string) +} + +// ISerializable defines interface with function: trans +type ISerializable interface { + trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) +} + +// DefaultSerializable defines default serializable struct +type DefaultSerializable struct { + params map[string]string + headers map[string][]string + data interface{} +} + +func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) { + return input.params, input.headers, input.data, nil +} + +var defaultSerializable = &DefaultSerializable{} + +func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable { + return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil} +} + +func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(subResource): ""} + data, err = ConvertRequestToIoReader(input) + return +} + +func (baseModel *BaseModel) setStatusCode(statusCode int) { + baseModel.StatusCode = statusCode +} + +func (baseModel *BaseModel) setRequestID(requestID string) { + baseModel.RequestId = requestID +} + +func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) { + baseModel.ResponseHeaders = responseHeaders +} + +func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if input.QueryLocation && !isObs { + setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs) + } + if input.BucketType != "" { + setHeaders(headers, HEADER_BUCKET_TYPE, []string{string(input.BucketType)}, true) + } + return +} + +func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) { + if grantReadID := input.GrantReadId; grantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs) + } + if grantWriteID := input.GrantWriteId; grantWriteID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs) + } + if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs) + } + if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs) + } + if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs) + } + if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true) + } + if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true) + } +} + +func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs) + } + if epid := input.Epid; epid != "" { + setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs) + } + if availableZone := input.AvailableZone; availableZone != "" { + setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs) + } + + input.prepareGrantHeaders(headers, isObs) + if input.IsFSFileInterface { + setHeaders(headers, headerFSFileInterface, []string{"Enabled"}, true) + } + + if location := strings.TrimSpace(input.Location); location != "" { + input.Location = location + + xml := make([]string, 0, 3) + xml = append(xml, "") + if isObs { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } else { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } + xml = append(xml, "") + + data = strings.Join(xml, "") + } + return +} + +func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + xml := make([]string, 0, 1) + if !isObs { + storageClass := "STANDARD" + if input.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if input.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + params = map[string]string{string(SubResourceStoragePolicy): ""} + xml = append(xml, fmt.Sprintf("%s", storageClass)) + } else { + if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold { + input.StorageClass = StorageClassStandard + } + params = map[string]string{string(SubResourceStorageClass): ""} + xml = append(xml, fmt.Sprintf("%s", input.StorageClass)) + } + data = strings.Join(xml, "") + return +} + +func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxKeys > 0 { + params["max-keys"] = IntToString(input.MaxKeys) + } + if input.EncodingType != "" { + params["encoding-type"] = input.EncodingType + } + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + if input.Marker != "" { + params["marker"] = input.Marker + } + return +} + +func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + params[string(SubResourceVersions)] = "" + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.VersionIdMarker != "" { + params["version-id-marker"] = input.VersionIdMarker + } + return +} + +func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceUploads): ""} + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxUploads > 0 { + params["max-uploads"] = IntToString(input.MaxUploads) + } + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.UploadIdMarker != "" { + params["upload-id-marker"] = input.UploadIdMarker + } + if input.EncodingType != "" { + params["encoding-type"] = input.EncodingType + } + return +} + +func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceQuota, input) +} + +func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + headers = make(map[string][]string) + + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourcePolicy): ""} + data = strings.NewReader(input.Policy) + return +} + +func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceCors): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceVersioning, input) +} + +func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceWebsite): ""} + data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false) + return +} + +func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLogging): ""} + data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs) + return +} + +func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLifecycle): ""} + data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs) + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketEncryptionInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceEncryption): ""} + data, _ = ConvertEncryptionConfigurationToXml(input.BucketEncryptionConfiguration, false, isObs) + return +} + +func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceTagging): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceNotification): ""} + data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs) + return +} + +func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceDelete): ""} + if strings.ToLower(input.EncodingType) == "url" { + for index, object := range input.Objects { + input.Objects[index].Key = url.QueryEscape(object.Key) + } + } + data, md5 := convertDeleteObjectsToXML(input) + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceRestore): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + if !isObs { + data, err = ConvertRequestToIoReader(input) + } else { + data = ConverntObsRestoreToXml(input) + } + return +} + +// GetEncryption gets the Encryption field value from SseKmsHeader +func (header SseKmsHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + if !header.isObs { + return DEFAULT_SSE_KMS_ENCRYPTION + } + return DEFAULT_SSE_KMS_ENCRYPTION_OBS +} + +// GetKey gets the Key field value from SseKmsHeader +func (header SseKmsHeader) GetKey() string { + return header.Key +} + +// GetEncryption gets the Encryption field value from SseCHeader +func (header SseCHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + return DEFAULT_SSE_C_ENCRYPTION +} + +// GetKey gets the Key field value from SseCHeader +func (header SseCHeader) GetKey() string { + return header.Key +} + +// GetKeyMD5 gets the KeyMD5 field value from SseCHeader +func (header SseCHeader) GetKeyMD5() string { + if header.KeyMD5 != "" { + return header.KeyMD5 + } + + if ret, err := Base64Decode(header.GetKey()); err == nil { + return Base64Md5(ret) + } + return "" +} + +func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) { + if sseHeader != nil { + if sseCHeader, ok := sseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok { + sseKmsHeader.isObs = isObs + setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs) + if sseKmsHeader.GetKey() != "" { + setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs) + } + } + } +} + +func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if input.Origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin} + } + + if input.RequestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader} + } + setSseHeader(headers, input.SseHeader, true, isObs) + return +} + +func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) { + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage} + } + + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } +} + +func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) { + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } +} + +func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + params = map[string]string{string(SubResourceMetadata): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs) + } else { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs) + } + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl} + } + input.prepareContentHeaders(headers) + if input.Expires != "" { + headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires} + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + } + input.prepareStorageClass(headers, isObs) + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input GetObjectInput) prepareResponseParams(params map[string]string) { + if input.ResponseCacheControl != "" { + params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl + } + if input.ResponseContentDisposition != "" { + params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition + } + if input.ResponseContentEncoding != "" { + params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding + } + if input.ResponseContentLanguage != "" { + params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage + } + if input.ResponseContentType != "" { + params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType + } + if input.ResponseExpires != "" { + params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires + } +} + +func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.GetObjectMetadataInput.trans(isObs) + if err != nil { + return + } + input.prepareResponseParams(params) + if input.ImageProcess != "" { + params[PARAM_IMAGE_PROCESS] = input.ImageProcess + } + if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart { + headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)} + } + + if input.IfMatch != "" { + headers[HEADER_IF_MATCH] = []string{input.IfMatch} + } + if input.IfNoneMatch != "" { + headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch} + } + if !input.IfModifiedSince.IsZero() { + headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)} + } + if !input.IfUnmodifiedSince.IsZero() { + headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)} + } + return +} + +func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) { + if GrantReadID := input.GrantReadId; GrantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true) + } + if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true) + } + if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true) + } + if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true) + } +} + +func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + params = make(map[string]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + input.prepareGrantHeaders(headers) + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + + } + setSseHeader(headers, input.SseHeader, false, isObs) + if input.Expires != 0 { + setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true) + } + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + + if input.ContentLength > 0 { + headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + + return +} + +func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.PutObjectBasicInput.trans(isObs) + if err != nil { + return + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input AppendObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.PutObjectBasicInput.trans(isObs) + if err != nil { + return + } + params[string(SubResourceAppend)] = "" + params["position"] = strconv.FormatInt(input.Position, 10) + if input.Body != nil { + data = input.Body + } + return +} + +func (input ModifyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + params = make(map[string]string) + params[string(SubResourceModify)] = "" + params["position"] = strconv.FormatInt(input.Position, 10) + if input.ContentLength > 0 { + headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)} + } + + if input.Body != nil { + data = input.Body + } + return +} + +func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) { + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl} + } + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE] = []string{input.ContentType} + } + if input.Expires != "" { + headers[HEADER_EXPIRES] = []string{input.Expires} + } +} + +func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) { + if input.CopySourceIfMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs) + } + if input.CopySourceIfNoneMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs) + } + if !input.CopySourceIfModifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs) + } + if !input.CopySourceIfUnmodifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs) + } +} + +func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs) + } + + if input.MetadataDirective == ReplaceMetadata { + input.prepareReplaceHeaders(headers) + } + + input.prepareCopySourceHeaders(headers, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + } + if input.SuccessActionRedirect != "" { + headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect} + } + return +} + +func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + return +} + +func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + params[string(SubResourceUploads)] = "" + if input.EncodingType != "" { + params["encoding-type"] = input.EncodingType + } + return +} + +func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string) + setSseHeader(headers, input.SseHeader, true, isObs) + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + if input.EncodingType != "" { + params["encoding-type"] = input.EncodingType + } + data, _ = ConvertCompleteMultipartUploadInputToXml(input, false) + return +} + +func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + if input.MaxParts > 0 { + params["max-parts"] = IntToString(input.MaxParts) + } + if input.PartNumberMarker > 0 { + params["part-number-marker"] = IntToString(input.PartNumberMarker) + } + if input.EncodingType != "" { + params["encoding-type"] = input.EncodingType + } + return +} + +func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string, 1) + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart { + setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs) + } + + setSseHeader(headers, input.SseHeader, true, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + + } + return +} + +func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceRequestPayment, input) +} + +type partSlice []Part + +func (parts partSlice) Len() int { + return len(parts) +} + +func (parts partSlice) Less(i, j int) bool { + return parts[i].PartNumber < parts[j].PartNumber +} + +func (parts partSlice) Swap(i, j int) { + parts[i], parts[j] = parts[j], parts[i] +} + +type readerWrapper struct { + reader io.Reader + mark int64 + totalCount int64 + readedCount int64 +} + +func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) { + if r, ok := rw.reader.(*strings.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*bytes.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*os.File); ok { + return r.Seek(offset, whence) + } + return offset, nil +} + +func (rw *readerWrapper) Read(p []byte) (n int, err error) { + if rw.totalCount == 0 { + return 0, io.EOF + } + if rw.totalCount > 0 { + n, err = rw.reader.Read(p) + readedOnce := int64(n) + remainCount := rw.totalCount - rw.readedCount + if remainCount > readedOnce { + rw.readedCount += readedOnce + return n, err + } + rw.readedCount += remainCount + return int(remainCount), io.EOF + } + return rw.reader.Read(p) +} + +type fileReaderWrapper struct { + readerWrapper + filePath string +} + +func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchPolicyToJSON(input) + return +} + +func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchJobToJSON(input) + return +} + +func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go new file mode 100644 index 0000000..ce29e6e --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go @@ -0,0 +1,874 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "syscall" +) + +var errAbort = errors.New("AbortError") + +// FileStatus defines the upload file properties +type FileStatus struct { + XMLName xml.Name `xml:"FileInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` +} + +// UploadPartInfo defines the upload part properties +type UploadPartInfo struct { + XMLName xml.Name `xml:"UploadPart"` + PartNumber int `xml:"PartNumber"` + Etag string `xml:"Etag"` + PartSize int64 `xml:"PartSize"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// UploadCheckpoint defines the upload checkpoint file properties +type UploadCheckpoint struct { + XMLName xml.Name `xml:"UploadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId,omitempty"` + UploadFile string `xml:"FileUrl"` + FileInfo FileStatus `xml:"FileInfo"` + UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"` +} + +func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool { + if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.") + return false + } + + if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.") + return false + } + + if ufc.UploadId == "" { + doLog(LEVEL_INFO, "UploadId is invalid. clear the record.") + return false + } + + return true +} + +type uploadPartTask struct { + UploadPartInput + obsClient *ObsClient + abort *int32 + extensions []extensionOptions + enableCheckpoint bool +} + +func (task *uploadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + + input := &UploadPartInput{} + input.Bucket = task.Bucket + input.Key = task.Key + input.PartNumber = task.PartNumber + input.UploadId = task.UploadId + input.SseHeader = task.SseHeader + input.SourceFile = task.SourceFile + input.Offset = task.Offset + input.PartSize = task.PartSize + extensions := task.extensions + + var output *UploadPartOutput + var err error + if extensions != nil { + output, err = task.obsClient.UploadPart(input, extensions...) + } else { + output, err = task.obsClient.UploadPart(input) + } + + if err == nil { + if output.ETag == "" { + doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber) + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber) + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return err +} + +func loadCheckpointFile(checkpointFile string, result interface{}) error { + ret, err := ioutil.ReadFile(checkpointFile) + if err != nil { + return err + } + if len(ret) == 0 { + return nil + } + return xml.Unmarshal(ret, result) +} + +func updateCheckpointFile(fc interface{}, checkpointFilePath string) error { + result, err := xml.Marshal(fc) + if err != nil { + return err + } + err = ioutil.WriteFile(checkpointFilePath, result, 0666) + return err +} + +func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, ufc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) { + if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err)) + } + } else { + return false, nil + } + + return true, nil +} + +func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error { + initiateInput := &InitiateMultipartUploadInput{} + initiateInput.ObjectOperationInput = input.ObjectOperationInput + initiateInput.ContentType = input.ContentType + initiateInput.EncodingType = input.EncodingType + var output *InitiateMultipartUploadOutput + var err error + if extensions != nil { + output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...) + } else { + output, err = obsClient.InitiateMultipartUpload(initiateInput) + } + if err != nil { + return err + } + + ufc.Bucket = input.Bucket + ufc.Key = input.Key + ufc.UploadFile = input.UploadFile + ufc.FileInfo = FileStatus{} + ufc.FileInfo.Size = uploadFileStat.Size() + ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix() + ufc.UploadId = output.UploadId + + err = sliceFile(input.PartSize, ufc) + return err +} + +func sliceFile(partSize int64, ufc *UploadCheckpoint) error { + fileSize := ufc.FileInfo.Size + cnt := fileSize / partSize + if cnt >= 10000 { + partSize = fileSize / 10000 + if fileSize%10000 != 0 { + partSize++ + } + cnt = fileSize / partSize + } + if fileSize%partSize != 0 { + cnt++ + } + + if partSize > MAX_PART_SIZE { + doLog(LEVEL_ERROR, "The source upload file is too large") + return fmt.Errorf("The source upload file is too large") + } + + if cnt == 0 { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = 1 + ufc.UploadParts = []UploadPartInfo{uploadPart} + } else { + uploadParts := make([]UploadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = int(i) + 1 + uploadPart.PartSize = partSize + uploadPart.Offset = i * partSize + uploadParts = append(uploadParts, uploadPart) + } + if value := fileSize % partSize; value != 0 { + uploadParts[cnt-1].PartSize = value + } + ufc.UploadParts = uploadParts + } + return nil +} + +func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error { + input := &AbortMultipartUploadInput{} + input.Bucket = bucket + input.Key = key + input.UploadId = uploadID + if extensions != nil { + _, err := obsClient.AbortMultipartUpload(input, extensions...) + return err + } + _, err := obsClient.AbortMultipartUpload(input) + return err +} + +func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error { + if uploadPartError != nil { + if enableCheckpoint { + return uploadPartError + } + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return uploadPartError + } + return nil +} + +func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, encodingType string, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + completeInput := &CompleteMultipartUploadInput{} + completeInput.Bucket = ufc.Bucket + completeInput.Key = ufc.Key + completeInput.UploadId = ufc.UploadId + completeInput.EncodingType = encodingType + parts := make([]Part, 0, len(ufc.UploadParts)) + for _, uploadPart := range ufc.UploadParts { + part := Part{} + part.PartNumber = uploadPart.PartNumber + part.ETag = uploadPart.Etag + parts = append(parts, part) + } + completeInput.Parts = parts + var completeOutput *CompleteMultipartUploadOutput + if extensions != nil { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...) + } else { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput) + } + + if err == nil { + if enableCheckpoint { + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err) + } + } + return completeOutput, err + } + if !enableCheckpoint { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + } + return completeOutput, err +} + +func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + uploadFileStat, err := os.Stat(input.UploadFile) + if err != nil { + doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err)) + return nil, err + } + if uploadFileStat.IsDir() { + doLog(LEVEL_ERROR, "UploadFile can not be a folder.") + return nil, errors.New("uploadFile can not be a folder") + } + + ufc := &UploadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + } + if needCheckpoint { + err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + + if enableCheckpoint { + err = updateCheckpointFile(ufc, checkpointFilePath) + if err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err) + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return nil, err + } + } + } + + uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions) + err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions) + if err != nil { + return nil, err + } + + completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, input.EncodingType, extensions) + + return completeOutput, err +} + +func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) { + if uploadPartOutput, ok := result.(*UploadPartOutput); ok { + lock.Lock() + defer lock.Unlock() + ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag + ufc.UploadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(ufc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var uploadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, uploadPart := range ufc.UploadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if uploadPart.IsCompleted { + continue + } + task := uploadPartTask{ + UploadPartInput: UploadPartInput{ + Bucket: ufc.Bucket, + Key: ufc.Key, + PartNumber: uploadPart.PartNumber, + UploadId: ufc.UploadId, + SseHeader: input.SseHeader, + SourceFile: input.UploadFile, + Offset: uploadPart.Offset, + PartSize: uploadPart.PartSize, + }, + obsClient: &obsClient, + abort: &abort, + extensions: extensions, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + uploadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := uploadPartError.Load().(error); ok { + return err + } + return nil +} + +// ObjectInfo defines download object info +type ObjectInfo struct { + XMLName xml.Name `xml:"ObjectInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` + ETag string `xml:"ETag"` +} + +// TempFileInfo defines temp download file properties +type TempFileInfo struct { + XMLName xml.Name `xml:"TempFileInfo"` + TempFileUrl string `xml:"TempFileUrl"` + Size int64 `xml:"Size"` +} + +// DownloadPartInfo defines download part properties +type DownloadPartInfo struct { + XMLName xml.Name `xml:"DownloadPart"` + PartNumber int64 `xml:"PartNumber"` + RangeEnd int64 `xml:"RangeEnd"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// DownloadCheckpoint defines download checkpoint file properties +type DownloadCheckpoint struct { + XMLName xml.Name `xml:"DownloadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` + DownloadFile string `xml:"FileUrl"` + ObjectInfo ObjectInfo `xml:"ObjectInfo"` + TempFileInfo TempFileInfo `xml:"TempFileInfo"` + DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"` +} + +func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool { + if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.") + return false + } + if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.") + return false + } + if dfc.TempFileInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.") + return false + } + stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl) + if err != nil || stat.Size() != dfc.ObjectInfo.Size { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.") + return false + } + + return true +} + +type downloadPartTask struct { + GetObjectInput + obsClient *ObsClient + extensions []extensionOptions + abort *int32 + partNumber int64 + tempFileURL string + enableCheckpoint bool +} + +func (task *downloadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + getObjectInput := &GetObjectInput{} + getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput + getObjectInput.IfMatch = task.IfMatch + getObjectInput.IfNoneMatch = task.IfNoneMatch + getObjectInput.IfModifiedSince = task.IfModifiedSince + getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince + getObjectInput.RangeStart = task.RangeStart + getObjectInput.RangeEnd = task.RangeEnd + + var output *GetObjectOutput + var err error + if task.extensions != nil { + output, err = task.obsClient.GetObject(getObjectInput, task.extensions...) + } else { + output, err = task.obsClient.GetObject(getObjectInput) + } + + if err == nil { + defer func() { + errMsg := output.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body.") + } + }() + _err := updateDownloadFile(task.tempFileURL, task.RangeStart, output) + if _err != nil { + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return _err + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return err +} + +func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) { + if extensions != nil { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...) + } else { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput) + } + + return +} + +func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, dfc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !dfc.isValid(input, output) { + if dfc.TempFileInfo.TempFileUrl != "" { + _err := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err) + } + } else { + return false, nil + } + + return true, nil +} + +func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) { + cnt := objectSize / partSize + if objectSize%partSize > 0 { + cnt++ + } + + if cnt == 0 { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = 1 + dfc.DownloadParts = []DownloadPartInfo{downloadPart} + } else { + downloadParts := make([]DownloadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = i + 1 + downloadPart.Offset = i * partSize + downloadPart.RangeEnd = (i+1)*partSize - 1 + downloadParts = append(downloadParts, downloadPart) + } + dfc.DownloadParts = downloadParts + if value := objectSize % partSize; value > 0 { + dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1 + } + } +} + +func createFile(tempFileURL string, fileSize int64) error { + fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := syscall.Close(fd) + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + err = syscall.Ftruncate(fd, fileSize) + if err != nil { + doLog(LEVEL_WARN, "Failed to create file with error [%v].", err) + } + return err +} + +func prepareTempFile(tempFileURL string, fileSize int64) error { + parentDir := filepath.Dir(tempFileURL) + stat, err := os.Stat(parentDir) + if err != nil { + doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err) + _err := os.MkdirAll(parentDir, os.ModePerm) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err) + return _err + } + } else if !stat.IsDir() { + doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir) + return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir) + } + + err = createFile(tempFileURL, fileSize) + if err == nil { + return nil + } + fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + if fileSize > 0 { + _, err = fd.WriteAt([]byte("a"), fileSize-1) + if err != nil { + doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err) + return err + } + } + + return nil +} + +func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error { + if downloadFileError != nil { + if !enableCheckpoint { + _err := os.Remove(tempFileURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + return downloadFileError + } + return nil +} + +func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) { + getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions) + if err != nil { + return nil, err + } + + objectSize := getObjectmetaOutput.ContentLength + partSize := input.PartSize + dfc := &DownloadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput) + if err != nil { + return nil, err + } + } + + if needCheckpoint { + dfc.Bucket = input.Bucket + dfc.Key = input.Key + dfc.VersionId = input.VersionId + dfc.DownloadFile = input.DownloadFile + dfc.ObjectInfo = ObjectInfo{} + dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix() + dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength + dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag + dfc.TempFileInfo = TempFileInfo{} + dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp" + dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength + + sliceObject(objectSize, partSize, dfc) + _err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size) + if _err != nil { + return nil, _err + } + + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err) + _errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _errMsg != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg) + } + return nil, _err + } + } + } + + downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions) + err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError) + if err != nil { + return nil, err + } + + err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile) + if err != nil { + doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err) + return nil, err + } + if enableCheckpoint { + err = os.Remove(checkpointFilePath) + if err != nil { + doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err) + } + } + + return getObjectmetaOutput, nil +} + +func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error { + fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + _, err = fd.Seek(rangeStart, 0) + if err != nil { + doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err) + return err + } + fileWriter := bufio.NewWriterSize(fd, 65536) + part := make([]byte, 8192) + var readErr error + var readCount int + for { + readCount, readErr = output.Body.Read(part) + if readCount > 0 { + wcnt, werr := fileWriter.Write(part[0:readCount]) + if werr != nil { + doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr) + return werr + } + if wcnt != readCount { + doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + } + } + if readErr != nil { + if readErr != io.EOF { + doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr) + return readErr + } + break + } + } + err = fileWriter.Flush() + if err != nil { + doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err) + return err + } + return nil +} + +func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) { + if _, ok := result.(*GetObjectOutput); ok { + lock.Lock() + defer lock.Unlock() + dfc.DownloadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFile) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var downloadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, downloadPart := range dfc.DownloadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if downloadPart.IsCompleted { + continue + } + task := downloadPartTask{ + GetObjectInput: GetObjectInput{ + GetObjectMetadataInput: input.GetObjectMetadataInput, + IfMatch: input.IfMatch, + IfNoneMatch: input.IfNoneMatch, + IfUnmodifiedSince: input.IfUnmodifiedSince, + IfModifiedSince: input.IfModifiedSince, + RangeStart: downloadPart.Offset, + RangeEnd: downloadPart.RangeEnd, + }, + obsClient: &obsClient, + extensions: extensions, + abort: &abort, + partNumber: downloadPart.PartNumber, + tempFileURL: dfc.TempFileInfo.TempFileUrl, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + downloadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := downloadPartError.Load().(error); ok { + return err + } + + return nil +} diff --git a/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go new file mode 100644 index 0000000..6ee5382 --- /dev/null +++ b/vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go @@ -0,0 +1,551 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$") +var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$") +var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+") +var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+") + +// StringContains replaces subStr in src with subTranscoding and returns the new string +func StringContains(src string, subStr string, subTranscoding string) string { + return strings.Replace(src, subStr, subTranscoding, -1) +} + +// XmlTranscoding replaces special characters with their escaped form +func XmlTranscoding(src string) string { + srcTmp := StringContains(src, "&", "&") + srcTmp = StringContains(srcTmp, "<", "<") + srcTmp = StringContains(srcTmp, ">", ">") + srcTmp = StringContains(srcTmp, "'", "'") + srcTmp = StringContains(srcTmp, "\"", """) + return srcTmp +} + +// StringToInt converts string value to int value with default value +func StringToInt(value string, def int) int { + ret, err := strconv.Atoi(value) + if err != nil { + ret = def + } + return ret +} + +// StringToInt64 converts string value to int64 value with default value +func StringToInt64(value string, def int64) int64 { + ret, err := strconv.ParseInt(value, 10, 64) + if err != nil { + ret = def + } + return ret +} + +// IntToString converts int value to string value +func IntToString(value int) string { + return strconv.Itoa(value) +} + +// Int64ToString converts int64 value to string value +func Int64ToString(value int64) string { + return strconv.FormatInt(value, 10) +} + +// GetCurrentTimestamp gets unix time in milliseconds +func GetCurrentTimestamp() int64 { + return time.Now().UnixNano() / 1000000 +} + +// FormatUtcNow gets a textual representation of the UTC format time value +func FormatUtcNow(format string) string { + return time.Now().UTC().Format(format) +} + +// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value +func FormatUtcToRfc1123(t time.Time) string { + ret := t.UTC().Format(time.RFC1123) + return ret[:strings.LastIndex(ret, "UTC")] + "GMT" +} + +// Md5 gets the md5 value of input +func Md5(value []byte) []byte { + m := md5.New() + _, err := m.Write(value) + if err != nil { + doLog(LEVEL_WARN, "MD5 failed to write") + } + return m.Sum(nil) +} + +// HmacSha1 gets hmac sha1 value of input +func HmacSha1(key, value []byte) []byte { + mac := hmac.New(sha1.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha1 failed to write") + } + return mac.Sum(nil) +} + +// HmacSha256 get hmac sha256 value if input +func HmacSha256(key, value []byte) []byte { + mac := hmac.New(sha256.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha256 failed to write") + } + return mac.Sum(nil) +} + +// Base64Encode wrapper of base64.StdEncoding.EncodeToString +func Base64Encode(value []byte) string { + return base64.StdEncoding.EncodeToString(value) +} + +// Base64Decode wrapper of base64.StdEncoding.DecodeString +func Base64Decode(value string) ([]byte, error) { + return base64.StdEncoding.DecodeString(value) +} + +// HexMd5 returns the md5 value of input in hexadecimal format +func HexMd5(value []byte) string { + return Hex(Md5(value)) +} + +// Base64Md5 returns the md5 value of input with Base64Encode +func Base64Md5(value []byte) string { + return Base64Encode(Md5(value)) +} + +// Sha256Hash returns sha256 checksum +func Sha256Hash(value []byte) []byte { + hash := sha256.New() + _, err := hash.Write(value) + if err != nil { + doLog(LEVEL_WARN, "Sha256Hash failed to write") + } + return hash.Sum(nil) +} + +// ParseXml wrapper of xml.Unmarshal +func ParseXml(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return xml.Unmarshal(value, result) +} + +// parseJSON wrapper of json.Unmarshal +func parseJSON(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return json.Unmarshal(value, result) +} + +// TransToXml wrapper of xml.Marshal +func TransToXml(value interface{}) ([]byte, error) { + if value == nil { + return []byte{}, nil + } + return xml.Marshal(value) +} + +// Hex wrapper of hex.EncodeToString +func Hex(value []byte) string { + return hex.EncodeToString(value) +} + +// HexSha256 returns the Sha256Hash value of input in hexadecimal format +func HexSha256(value []byte) string { + return Hex(Sha256Hash(value)) +} + +// UrlDecode wrapper of url.QueryUnescape +func UrlDecode(value string) (string, error) { + ret, err := url.QueryUnescape(value) + if err == nil { + return ret, nil + } + return "", err +} + +// UrlDecodeWithoutError wrapper of UrlDecode +func UrlDecodeWithoutError(value string) string { + ret, err := UrlDecode(value) + if err == nil { + return ret + } + if isErrorLogEnabled() { + doLog(LEVEL_ERROR, "Url decode error") + } + return "" +} + +// IsIP checks whether the value matches ip address +func IsIP(value string) bool { + return ipRegex.MatchString(value) +} + +// UrlEncode encodes the input value +func UrlEncode(value string, chineseOnly bool) string { + if chineseOnly { + values := make([]string, 0, len(value)) + for _, val := range value { + _value := string(val) + if regex.MatchString(_value) { + _value = url.QueryEscape(_value) + } + values = append(values, _value) + } + return strings.Join(values, "") + } + return url.QueryEscape(value) +} + +func copyHeaders(m map[string][]string) (ret map[string][]string) { + if m != nil { + ret = make(map[string][]string, len(m)) + for key, values := range m { + _values := make([]string, 0, len(values)) + for _, value := range values { + _values = append(_values, value) + } + ret[strings.ToLower(key)] = _values + } + } else { + ret = make(map[string][]string) + } + + return +} + +func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) { + signature = "v2" + if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 { + if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) { + signature = "v4" + matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0]) + if len(matches) >= 3 { + region = matches[1] + regions := regionRegex.FindStringSubmatch(region) + if len(regions) >= 2 { + region = regions[1] + } + signedHeaders = matches[2] + } + + } else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) { + signature = "v2" + } + } + return +} + +func getTemporaryKeys() []string { + return []string{ + "Signature", + "signature", + "X-Amz-Signature", + "x-amz-signature", + } +} + +func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool { + isObs := true + if isTemporary { + for _, value := range querys { + keyPrefix := strings.ToLower(value) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + } else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) { + isObs = false + } + } + } else { + for key := range headers { + keyPrefix := strings.ToLower(key) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + break + } + } + } + return isObs +} + +func isPathStyle(headers map[string][]string, bucketName string) bool { + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + return true + } + return false +} + +// GetV2Authorization v2 Authorization +func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + params := make(map[string]string) + + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + headers = copyHeaders(headers) + pathStyle := isPathStyle(headers, bucketName) + conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + conf.signature = SignatureObs + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true) + v2HashPrefix := OBS_HASH_PREFIX + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + return +} + +func getQuerysResult(querys []string) []string { + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + return querysResult +} + +func getParams(querysResult []string) map[string]string { + params := make(map[string]string) + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + return params +} + +func getTemporaryAndSignature(params map[string]string) (bool, string) { + isTemporary := false + signature := "v2" + temporaryKeys := getTemporaryKeys() + for _, key := range temporaryKeys { + if _, ok := params[key]; ok { + isTemporary = true + if strings.ToLower(key) == "signature" { + signature = "v2" + } else if strings.ToLower(key) == "x-amz-signature" { + signature = "v4" + } + break + } + } + return isTemporary, signature +} + +// GetAuthorization Authorization +func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := getQuerysResult(querys) + params := getParams(querysResult) + + isTemporary, signature := getTemporaryAndSignature(params) + + isObs := getIsObs(isTemporary, querysResult, headers) + headers = copyHeaders(headers) + pathStyle := false + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + pathStyle = true + } + conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + + if isTemporary { + return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs) + } + signature, region, signedHeaders := parseHeaders(headers) + if signature == "v4" { + conf.signature = SignatureV4 + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestURL") + return nil + } + headerKeys := strings.Split(signedHeaders, ";") + _headers := make(map[string][]string, len(headerKeys)) + for _, headerKey := range headerKeys { + _headers[headerKey] = headers[headerKey] + } + ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers) + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + v2HashPrefix := V2_HASH_PREFIX + if isObs { + v2HashPrefix = OBS_HASH_PREFIX + } + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + } + return + +} + +func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string, + headers map[string][]string, isObs bool) (ret map[string]string) { + + if signature == "v4" { + conf.signature = SignatureV4 + + longDate, ok := params[PARAM_DATE_AMZ_CAMEL] + if !ok { + longDate = params[HEADER_DATE_AMZ] + } + shortDate := longDate[:8] + + credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL] + if !ok { + credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)] + } + + _credential := UrlDecodeWithoutError(credential) + + regions := regionRegex.FindStringSubmatch(_credential) + var region string + if len(regions) >= 2 { + region = regions[1] + } + + _, scope := getCredential(ak, region, shortDate) + + expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL] + if !ok { + expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)] + } + + signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] + if !ok { + signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)] + } + + algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL] + if !ok { + algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)] + } + + if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok { + delete(params, PARAM_SIGNATURE_AMZ_CAMEL) + } else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok { + delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)) + } + + ret = make(map[string]string, 6) + ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm + ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + ret[PARAM_DATE_AMZ_CAMEL] = longDate + ret[PARAM_EXPIRES_AMZ_CAMEL] = expires + ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders + + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestUrl") + return nil + } + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers) + ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + expires, ok := params["Expires"] + if !ok { + expires = params["expires"] + } + headers[HEADER_DATE_CAMEL] = []string{expires} + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + ret = make(map[string]string, 3) + ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false) + ret["AWSAccessKeyId"] = UrlEncode(ak, false) + ret["Expires"] = UrlEncode(expires, false) + } + + return +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 0000000..5091fb0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 0000000..c56f37c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,28 @@ +language: go + +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000..b03310a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000..fb38ec2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,51 @@ + +CMD = jpgo + +SRC_PKGS=./ ./cmd/... ./fuzz/... + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ${SRC_PKGS} + +build: + rm -f $(CMD) + go build ${SRC_PKGS} + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} + +check: + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000..110ad79 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,87 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000..010efe9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the representation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000..1cd2d23 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000..9b7cd89 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000..13c7460 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000..817900c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000..4abc303 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expression: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000..dae79cb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000..ddc1b7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/LICENSE.txt b/vendor/github.com/ks3sdklib/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/NOTICE.txt b/vendor/github.com/ks3sdklib/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000..5f14d11 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000..cf54d89 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,88 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", err.Code(), err.Message()) +// +// Prints out full error message, including original error if there was one. +// log.Println("Error:", err.Error()) +// +// // Get original error +// if origErr := err.Err(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000..a0d7986 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,96 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + if !root { + dst.Set(reflect.New(src.Type()).Elem()) + } + + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000..7ae01ef --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,175 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, create, caseSensitive) + if vals != nil && len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if create && value.Kind() == reflect.Ptr && value.IsNil() { + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if create { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of objects at the lexical path inside of a structure +func ValuesAtPath(i interface{}, path string) []interface{} { + if rvals := rValuesAtPath(i, path, false, true); rvals != nil { + vals := make([]interface{}, len(rvals)) + for i, rval := range rvals { + vals[i] = rval.Interface() + } + return vals + } + return nil +} + +// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical +// path inside of a structure +func ValuesAtAnyPath(i interface{}, path string) []interface{} { + if rvals := rValuesAtPath(i, path, false, false); rvals != nil { + vals := make([]interface{}, len(rvals)) + for i, rval := range rvals { + vals[i] = rval.Interface() + } + return vals + } + return nil +} + +// SetValueAtPath sets an object at the lexical path inside of a structure +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, true); rvals != nil { + for _, rval := range rvals { + rval.Set(reflect.ValueOf(v)) + } + } +} + +// SetValueAtAnyPath sets an object at the case insensitive lexical path inside +// of a structure +func SetValueAtAnyPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false); rvals != nil { + for _, rval := range rvals { + rval.Set(reflect.ValueOf(v)) + } + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000..09673a1 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// stringValue will recursively walk value v to build a textual +// representation of the value. +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/util.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/util.go new file mode 100644 index 0000000..8ca8715 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/awsutil/util.go @@ -0,0 +1,133 @@ +package awsutil + +import ( + "bytes" + "encoding/xml" + "fmt" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "go/format" + "io" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" +) + +// GoFmt returns the Go formated string of the input. +// +// Panics if the format fails. +func GoFmt(buf string) string { + formatted, err := format.Source([]byte(buf)) + if err != nil { + panic(fmt.Errorf("%s\nOriginal code:\n%s", err.Error(), buf)) + } + return string(formatted) +} + +var reTrim = regexp.MustCompile(`\s{2,}`) + +// Trim removes all leading and trailing white space. +// +// All consecutive spaces will be reduced to a single space. +func Trim(s string) string { + return strings.TrimSpace(reTrim.ReplaceAllString(s, " ")) +} + +// Capitalize capitalizes the first character of the string. +func Capitalize(s string) string { + if len(s) == 1 { + return strings.ToUpper(s) + } + return strings.ToUpper(s[0:1]) + s[1:] +} + +// SortXML sorts the reader's XML elements +func SortXML(r io.Reader) string { + var buf bytes.Buffer + d := xml.NewDecoder(r) + root, _ := xmlutil.XMLToStruct(d, nil) + e := xml.NewEncoder(&buf) + xmlutil.StructToXML(e, root, true) + return buf.String() +} + +// PrettyPrint generates a human readable representation of the value v. +// All values of v are recursively found and pretty printed also. +func PrettyPrint(v interface{}) string { + value := reflect.ValueOf(v) + switch value.Kind() { + case reflect.Struct: + str := fullName(value.Type()) + "{\n" + for i := 0; i < value.NumField(); i++ { + l := string(value.Type().Field(i).Name[0]) + if strings.ToUpper(l) == l { + str += value.Type().Field(i).Name + ": " + str += PrettyPrint(value.Field(i).Interface()) + str += ",\n" + } + } + str += "}" + return str + case reflect.Map: + str := "map[" + fullName(value.Type().Key()) + "]" + fullName(value.Type().Elem()) + "{\n" + for _, k := range value.MapKeys() { + str += "\"" + k.String() + "\": " + str += PrettyPrint(value.MapIndex(k).Interface()) + str += ",\n" + } + str += "}" + return str + case reflect.Ptr: + if e := value.Elem(); e.IsValid() { + return "&" + PrettyPrint(e.Interface()) + } + return "nil" + case reflect.Slice: + str := "[]" + fullName(value.Type().Elem()) + "{\n" + for i := 0; i < value.Len(); i++ { + str += PrettyPrint(value.Index(i).Interface()) + str += ",\n" + } + str += "}" + return str + default: + return fmt.Sprintf("%#v", v) + } +} + +func pkgName(t reflect.Type) string { + pkg := t.PkgPath() + c := strings.Split(pkg, "/") + return c[len(c)-1] +} + +func fullName(t reflect.Type) string { + if pkg := pkgName(t); pkg != "" { + return pkg + "." + t.Name() + } + return t.Name() +} + +//获取指定目录及所有子目录下的所有文件,可以匹配后缀过滤。 +func WalkDir(dirPth, suffix string) (files []string, err error) { + files = make([]string, 0, 30) + suffix = strings.ToUpper(suffix) //忽略后缀匹配的大小写 + + err = filepath.Walk(dirPth, func(filename string, fi os.FileInfo, err error) error { //遍历目录 + //if err != nil { //忽略错误 + // return err + //} + + if fi.IsDir() { // 忽略目录 + return nil + } + + if strings.HasSuffix(strings.ToUpper(fi.Name()), suffix) { + files = append(files, filename) + } + + return nil + }) + return files, err +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/config.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/config.go new file mode 100644 index 0000000..5f673e6 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/config.go @@ -0,0 +1,173 @@ +package aws + +import ( + "io" + "net/http" + "os" + "time" + + "github.com/ks3sdklib/aws-sdk-go/aws/credentials" +) + +// DefaultChainCredentials is a Credentials which will find the first available +// credentials Value from the list of Providers. +// +// This should be used in the default case. Once the type of credentials are +// known switching to the specific Credentials will be more efficient. +var DefaultChainCredentials = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute}, + }) + +// The default number of retries for a service. The value of -1 indicates that +// the service specific retry default will be used. +const DefaultRetries = -1 + +// DefaultConfig is the default all service configuration will be based off of. +var DefaultConfig = &Config{ + Credentials: DefaultChainCredentials, + Endpoint: "", + Region: os.Getenv("AWS_REGION"), + DisableSSL: false, + ManualSend: false, + HTTPClient: http.DefaultClient, + LogHTTPBody: false, + LogLevel: 0, + Logger: os.Stdout, + MaxRetries: DefaultRetries, + DisableParamValidation: false, + DisableComputeChecksums: false, + S3ForcePathStyle: false, +} + +// A Config provides service configuration +type Config struct { + Credentials *credentials.Credentials + Endpoint string + Region string + DisableSSL bool + ManualSend bool + HTTPClient *http.Client + LogHTTPBody bool + LogLevel uint + Logger io.Writer + MaxRetries int + DisableParamValidation bool + DisableComputeChecksums bool + S3ForcePathStyle bool +} + +// Copy will return a shallow copy of the Config object. +func (c Config) Copy() Config { + dst := Config{} + dst.Credentials = c.Credentials + dst.Endpoint = c.Endpoint + dst.Region = c.Region + dst.DisableSSL = c.DisableSSL + dst.ManualSend = c.ManualSend + dst.HTTPClient = c.HTTPClient + dst.LogHTTPBody = c.LogHTTPBody + dst.LogLevel = c.LogLevel + dst.Logger = c.Logger + dst.MaxRetries = c.MaxRetries + dst.DisableParamValidation = c.DisableParamValidation + dst.DisableComputeChecksums = c.DisableComputeChecksums + dst.S3ForcePathStyle = c.S3ForcePathStyle + + return dst +} + +// Merge merges the newcfg attribute values into this Config. Each attribute +// will be merged into this config if the newcfg attribute's value is non-zero. +// Due to this, newcfg attributes with zero values cannot be merged in. For +// example bool attributes cannot be cleared using Merge, and must be explicitly +// set on the Config structure. +func (c Config) Merge(newcfg *Config) *Config { + if newcfg == nil { + return &c + } + + cfg := Config{} + + if newcfg.Credentials != nil { + cfg.Credentials = newcfg.Credentials + } else { + cfg.Credentials = c.Credentials + } + + if newcfg.Endpoint != "" { + cfg.Endpoint = newcfg.Endpoint + } else { + cfg.Endpoint = c.Endpoint + } + + if newcfg.Region != "" { + cfg.Region = newcfg.Region + } else { + cfg.Region = c.Region + } + + if newcfg.DisableSSL { + cfg.DisableSSL = newcfg.DisableSSL + } else { + cfg.DisableSSL = c.DisableSSL + } + + if newcfg.ManualSend { + cfg.ManualSend = newcfg.ManualSend + } else { + cfg.ManualSend = c.ManualSend + } + + if newcfg.HTTPClient != nil { + cfg.HTTPClient = newcfg.HTTPClient + } else { + cfg.HTTPClient = c.HTTPClient + } + + if newcfg.LogHTTPBody { + cfg.LogHTTPBody = newcfg.LogHTTPBody + } else { + cfg.LogHTTPBody = c.LogHTTPBody + } + + if newcfg.LogLevel != 0 { + cfg.LogLevel = newcfg.LogLevel + } else { + cfg.LogLevel = c.LogLevel + } + + if newcfg.Logger != nil { + cfg.Logger = newcfg.Logger + } else { + cfg.Logger = c.Logger + } + + if newcfg.MaxRetries != DefaultRetries { + cfg.MaxRetries = newcfg.MaxRetries + } else { + cfg.MaxRetries = c.MaxRetries + } + + if newcfg.DisableParamValidation { + cfg.DisableParamValidation = newcfg.DisableParamValidation + } else { + cfg.DisableParamValidation = c.DisableParamValidation + } + + if newcfg.DisableComputeChecksums { + cfg.DisableComputeChecksums = newcfg.DisableComputeChecksums + } else { + cfg.DisableComputeChecksums = c.DisableComputeChecksums + } + + if newcfg.S3ForcePathStyle { + cfg.S3ForcePathStyle = newcfg.S3ForcePathStyle + } else { + cfg.S3ForcePathStyle = c.S3ForcePathStyle + } + + return &cfg +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000..679719a --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + ErrNoValidProvidersFoundInChain = apierr.New("NoCredentialProviders", "no valid providers in chain", nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{}, +// }) +// creds.Retrieve() +// +type ChainProvider struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + for _, p := range c.Providers { + if creds, err := p.Retrieve(); err == nil { + c.curr = p + return creds, nil + } + } + c.curr = nil + + // TODO better error reporting. maybe report error for each failed retrieve? + + return Value{}, ErrNoValidProvidersFoundInChain +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000..6f6ec03 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,178 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// Create an empty Credential object that can be used as dummy placeholder +// credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} + +// Provide a stub-able time.Now for unit tests so expiry can be tested. +var currentTime = time.Now diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ec2_role_provider.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ec2_role_provider.go new file mode 100644 index 0000000..639ae43 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ec2_role_provider.go @@ -0,0 +1,173 @@ +package credentials + +import ( + "bufio" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &credentials.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: &http.Client{ +// Timeout: 10 * time.Second, +// }, +// // Use default EC2 Role metadata endpoint, Alternate endpoints can be +// // specified setting Endpoint to something else. +// Endpoint: "", +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +// +type EC2RoleProvider struct { + // Endpoint must be fully quantified URL + Endpoint string + + // HTTP client to use when connecting to EC2 service + Client *http.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // The date/time at which the credentials expire. + expiresOn time.Time +} + +// NewEC2RoleCredentials returns a pointer to a new Credentials object +// wrapping the EC2RoleProvider. +// +// Takes a custom http.Client which can be configured for custom handling of +// things such as timeout. +// +// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving +// role and credentials. +// +// Window is the expiry window that will be subtracted from the expiry returned +// by the role credential request. This is done so that the credentials will +// expire sooner than their actual lifespan. +func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials { + return NewCredentials(&EC2RoleProvider{ + Endpoint: endpoint, + Client: client, + ExpiryWindow: window, + }) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (Value, error) { + if m.Client == nil { + m.Client = http.DefaultClient + } + if m.Endpoint == "" { + m.Endpoint = metadataCredentialsEndpoint + } + + credsList, err := requestCredList(m.Client, m.Endpoint) + if err != nil { + return Value{}, err + } + + if len(credsList) == 0 { + return Value{}, apierr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, m.Endpoint, credsName) + if err != nil { + return Value{}, err + } + + m.expiresOn = roleCreds.Expiration + if m.ExpiryWindow > 0 { + // Offset based on expiry window if set. + m.expiresOn = m.expiresOn.Add(-m.ExpiryWindow) + } + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + }, nil +} + +// IsExpired returns if the credentials are expired. +func (m *EC2RoleProvider) IsExpired() bool { + return m.expiresOn.Before(currentTime()) +} + +// A ec2RoleCredRespBody provides the shape for deserializing credential +// request responses. +type ec2RoleCredRespBody struct { + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *http.Client, endpoint string) ([]string, error) { + resp, err := client.Get(endpoint) + if err != nil { + return nil, apierr.New("ListEC2Role", "failed to list EC2 Roles", err) + } + defer resp.Body.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, apierr.New("ReadEC2Role", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) { + resp, err := client.Get(endpoint + credsName) + if err != nil { + return nil, apierr.New("GetEC2RoleCredentials", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + defer resp.Body.Close() + + respCreds := &ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil { + return nil, apierr.New("DecodeEC2RoleCredentials", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + return respCreds, nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000..75e7f60 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,67 @@ +package credentials + +import ( + "os" + + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = apierr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = apierr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// - Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// - Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000..aa2dc50 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,8 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ini.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ini.go new file mode 100644 index 0000000..199df17 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/ini.go @@ -0,0 +1,123 @@ +// Package ini provides functions for parsing INI configuration files. +package credentials + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) +) + +// ErrSyntax is returned when there is a syntax error in an INI file. +type ErrSyntax struct { + Line int + Source string // The contents of the erroneous line, without leading or trailing whitespace +} + +func (e ErrSyntax) Error() string { + return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) +} + +// A File represents a parsed INI file. +type File map[string]Section + +// A Section represents a single section of an INI file. +type Section map[string]string + +// Returns a named Section. A Section will be created if one does not already exist for the given name. +func (f File) Section(name string) Section { + section := f[name] + if section == nil { + section = make(Section) + f[name] = section + } + return section +} + +// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. +func (f File) Get(section, key string) (value string, ok bool) { + if s := f[section]; s != nil { + value, ok = s[key] + } + return +} + +// Loads INI data from a reader and stores the data in the File. +func (f File) Load(in io.Reader) (err error) { + bufin, ok := in.(*bufio.Reader) + if !ok { + bufin = bufio.NewReader(in) + } + return parseFile(bufin, f) +} + +// Loads INI data from a named file and stores the data in the File. +func (f File) LoadFile(file string) (err error) { + in, err := os.Open(file) + if err != nil { + return + } + defer in.Close() + return f.Load(in) +} + +func parseFile(in *bufio.Reader, file File) (err error) { + section := "" + lineNum := 0 + for done := false; !done; { + var line string + if line, err = in.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + return + } + } + lineNum++ + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := groups[1], groups[2] + key, val = strings.TrimSpace(key), strings.TrimSpace(val) + file.Section(section)[key] = val + } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { + name := strings.TrimSpace(groups[1]) + section = name + // Create the section if it does not exist + file.Section(section) + } else { + return ErrSyntax{lineNum, line} + } + + } + return nil +} + +// Loads and returns a File from a reader. +func Load(in io.Reader) (File, error) { + file := make(File) + err := file.Load(in) + return file, err +} + +// Loads and returns an INI File from a file on disk. +func LoadFile(filename string) (File, error) { + file := make(File) + err := file.LoadFile(filename) + return file, err +} \ No newline at end of file diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000..f1f38e9 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,131 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = apierr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. If empty will default to current user's + // home directory. + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := LoadFile(filename) + if err != nil { + return Value{}, apierr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile := config.Section(profile) + + id, ok := iniProfile["aws_access_key_id"] + if !ok { + return Value{}, apierr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret, ok := iniProfile["aws_secret_access_key"] + if !ok { + return Value{}, apierr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + token := iniProfile["aws_session_token"] + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000..253d79b --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,42 @@ +package credentials + +import ( + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = apierr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{}, ErrStaticCredentialsEmpty + } + + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handler_functions.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handler_functions.go new file mode 100644 index 0000000..970487b --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handler_functions.go @@ -0,0 +1,154 @@ +package aws + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/ks3sdklib/aws-sdk-go/aws/awserr" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +var sleepDelay = func(delay time.Duration) { + time.Sleep(delay) +} + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLength builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +func BuildContentLength(r *Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.bodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.bodyStart, 0) // make sure to seek back to original location + length = end - r.bodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +} + +// UserAgentHandler is a request handler for injecting User agent into requests. +func UserAgentHandler(r *Request) { + r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion) +} +func ContentTypeHandler(r *Request) { + if len(r.HTTPRequest.Header["Content-Type"]) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/xml") + } +} + +var reStatusCode = regexp.MustCompile(`^(\d+)`) + +// SendHandler is a request handler to send service request using HTTP client. +func SendHandler(r *Request) { + + var err error + if r.HTTPRequest.ContentLength <= 0 { + r.HTTPRequest.Body = http.NoBody + } + r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok { + if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + // Catch all other request errors. + r.Error = apierr.New("RequestError", "send request failed", err) + r.Retryable.Set(true) // network errors are retryable + } +} + +// ValidateResponseHandler is a request handler to validate service response. +func ValidateResponseHandler(r *Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = apierr.New("UnknownError", "unknown error", nil) + } +} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +func AfterRetryHandler(r *Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if !r.Retryable.IsSet() { + r.Retryable.Set(r.Service.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.Service.RetryRules(r) + sleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + if isCodeExpiredCreds(err.Code()) { + r.Config.Credentials.Expire() + } + } + } + + r.RetryCount++ + r.Error = nil + } +} + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion error = apierr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint error = apierr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +func ValidateEndpointHandler(r *Request) { + if r.Service.SigningRegion == "" && r.Service.Config.Region == "" { + r.Error = ErrMissingRegion + } else if r.Service.Endpoint == "" { + r.Error = ErrMissingEndpoint + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handlers.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handlers.go new file mode 100644 index 0000000..1968cb9 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/handlers.go @@ -0,0 +1,85 @@ +package aws + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// copy returns of this handler's lists. +func (h *Handlers) copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]func(*Request){}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []func(*Request){} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handlers f to the back of the handler list. +func (l *HandlerList) PushBack(f ...func(*Request)) { + l.list = append(l.list, f...) +} + +// PushFront pushes handlers f to the front of the handler list. +func (l *HandlerList) PushFront(f ...func(*Request)) { + l.list = append(f, l.list...) +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f(r) + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/param_validator.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/param_validator.go new file mode 100644 index 0000000..2c3e404 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/param_validator.go @@ -0,0 +1,89 @@ +package aws + +import ( + "fmt" + "reflect" + "strings" + + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +// ValidateParameters is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +func ValidateParameters(r *Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = apierr.New("InvalidParameter", msg, nil) + } + } +} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + notset := false + if f.Tag.Get("required") != "" { + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + notset = true + } + default: + if !fvalue.IsValid() { + notset = true + } + } + } + + if notset { + msg := "missing required parameter: " + path + prefix + f.Name + v.errors = append(v.errors, msg) + } else { + v.validateAny(fvalue, path+prefix+f.Name) + } + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/request.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/request.go new file mode 100644 index 0000000..a637cf4 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/request.go @@ -0,0 +1,310 @@ +package aws + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + "github.com/ks3sdklib/aws-sdk-go/aws/awsutil" +) + +// A Request is the service request to be made. +type Request struct { + *Service + Handlers Handlers + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + bodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount uint + Retryable SettableBool + RetryDelay time.Duration + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request { + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(service.Endpoint + p) + + r := &Request{ + Service: service, + Handlers: service.Handlers.copy(), + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && r.Retryable.Get() && r.RetryCount < r.Service.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + + if r.Error != nil { + return r.Error + } + if r.Retryable.Get() { + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.bodyStart, 0) + } + r.Retryable.Reset() + + r.Handlers.Send.Run(r) + if r.Error != nil { + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + return r.Error + } + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + return r.Error + } + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + return r.Error + } + continue + } + + break + } + + return nil +} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return r.nextPageTokens() != nil +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken) + if tr == nil || len(tr) == 0 { + return nil + } + switch v := tr[0].(type) { + case bool: + if v == false { + return nil + } + } + } + + found := false + tokens := make([]interface{}, len(r.Operation.OutputTokens)) + + for i, outtok := range r.Operation.OutputTokens { + v := awsutil.ValuesAtAnyPath(r.Data, outtok) + if v != nil && len(v) > 0 { + found = true + tokens[i] = v[0] + } + } + + if found { + return tokens + } + return nil +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if tokens == nil { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + page.Send() + shouldContinue := fn(page.Data, !page.HasNextPage()) + if page.Error != nil || !shouldContinue { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/service.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/service.go new file mode 100644 index 0000000..8c0fc5e --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/service.go @@ -0,0 +1,176 @@ +package aws + +import ( + "fmt" + "math" + "net/http" + "net/http/httputil" + "regexp" + "time" + + "github.com/ks3sdklib/aws-sdk-go/aws/awserr" + "github.com/ks3sdklib/aws-sdk-go/internal/endpoints" +) + +// A Service implements the base service request and response handling +// used by all services. +type Service struct { + Config *Config + Handlers Handlers + ManualSend bool + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + RetryRules func(*Request) time.Duration + ShouldRetry func(*Request) bool + DefaultMaxRetries uint +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// NewService will return a pointer to a new Server object initialized. +func NewService(config *Config) *Service { + svc := &Service{Config: config} + svc.Initialize() + return svc +} + +// Initialize initializes the service. +func (s *Service) Initialize() { + if s.Config == nil { + s.Config = &Config{} + } + if s.Config.HTTPClient == nil { + s.Config.HTTPClient = http.DefaultClient + } + + if s.RetryRules == nil { + s.RetryRules = retryRules + } + + if s.ShouldRetry == nil { + s.ShouldRetry = shouldRetry + } + + s.DefaultMaxRetries = 3 + s.Handlers.Validate.PushBack(ValidateEndpointHandler) + s.Handlers.Build.PushBack(UserAgentHandler) + s.Handlers.Sign.PushBack(BuildContentLength) + s.Handlers.Send.PushBack(SendHandler) + s.Handlers.AfterRetry.PushBack(AfterRetryHandler) + s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler) + s.AddDebugHandlers() + s.buildEndpoint() + + if !s.Config.DisableParamValidation { + s.Handlers.Validate.PushBack(ValidateParameters) + } +} + +// buildEndpoint builds the endpoint values the service will use to make requests with. +func (s *Service) buildEndpoint() { + if s.Config.Endpoint != "" { + s.Endpoint = s.Config.Endpoint + } else { + s.Endpoint, s.SigningRegion = + endpoints.EndpointForRegion(s.ServiceName, s.Config.Region) + } + if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) { + scheme := "https" + if s.Config.DisableSSL { + scheme = "http" + } + s.Endpoint = scheme + "://" + s.Endpoint + } +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (s *Service) AddDebugHandlers() { + out := s.Config.Logger + if s.Config.LogLevel == 0 { + return + } + + s.Handlers.Send.PushFront(func(r *Request) { + logBody := r.Config.LogHTTPBody + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + fmt.Fprintf(out, "---[ REQUEST POST-SIGN ]-----------------------------\n") + fmt.Fprintf(out, "%s\n", string(dumpedBody)) + fmt.Fprintf(out, "-----------------------------------------------------\n") + }) + s.Handlers.Send.PushBack(func(r *Request) { + fmt.Fprintf(out, "---[ RESPONSE ]--------------------------------------\n") + if r.HTTPResponse != nil { + logBody := r.Config.LogHTTPBody + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + fmt.Fprintf(out, "%s\n", string(dumpedBody)) + } else if r.Error != nil { + fmt.Fprintf(out, "%s\n", r.Error) + } + fmt.Fprintf(out, "-----------------------------------------------------\n") + }) +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (s *Service) MaxRetries() uint { + if s.Config.MaxRetries < 0 { + return s.DefaultMaxRetries + } + return uint(s.Config.MaxRetries) +} + +// retryRules returns the delay duration before retrying this request again +func retryRules(r *Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 30 + return delay * time.Millisecond +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// shouldRetry returns if the request should be retried. +func shouldRetry(r *Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/types.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/types.go new file mode 100644 index 0000000..7801cb6 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/types.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "io" + "time" +) + +// String converts a Go string into a string pointer. +func String(v string) *string { + return &v +} + +// Boolean converts a Go bool into a boolean pointer. +func Boolean(v bool) *bool { + return &v +} + +// Long converts a Go int64 into a long pointer. +func Long(v int64) *int64 { + return &v +} + +// Double converts a Go float64 into a double pointer. +func Double(v float64) *float64 { + return &v +} + +// Time converts a Go Time into a Time pointer +func Time(t time.Time) *time.Time { + return &t +} + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A SettableBool provides a boolean value which includes the state if +// the value was set or unset. The set state is in addition to the value's +// value(true|false) +type SettableBool struct { + value bool + set bool +} + +// SetBool returns a SettableBool with a value set +func SetBool(value bool) SettableBool { + return SettableBool{value: value, set: true} +} + +// Get returns the value. Will always be false if the SettableBool was not set. +func (b *SettableBool) Get() bool { + if !b.set { + return false + } + return b.value +} + +// Set sets the value and updates the state that the value has been set. +func (b *SettableBool) Set(value bool) { + b.value = value + b.set = true +} + +// IsSet returns if the value has been set +func (b *SettableBool) IsSet() bool { + return b.set +} + +// Reset resets the state and value of the SettableBool to its initial default +// state of not set and zero value. +func (b *SettableBool) Reset() { + b.value = false + b.set = false +} + +// String returns the string representation of the value if set. Zero if not set. +func (b *SettableBool) String() string { + return fmt.Sprintf("%t", b.Get()) +} + +// GoString returns the string representation of the SettableBool value and state +func (b *SettableBool) GoString() string { + return fmt.Sprintf("Bool{value:%t, set:%t}", b.value, b.set) +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/aws/version.go b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/version.go new file mode 100644 index 0000000..02da76d --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "0.6.5" diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/apierr/error.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/apierr/error.go new file mode 100644 index 0000000..c45555a --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/apierr/error.go @@ -0,0 +1,139 @@ +// Package apierr represents API error types. +package apierr + +import "fmt" + +// A BaseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type BaseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + origErr error +} + +// New returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func New(code, message string, origErr error) *BaseError { + return &BaseError{ + code: code, + message: message, + origErr: origErr, + } +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b *BaseError) Error() string { + return b.ErrorWithExtra("") +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b *BaseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b *BaseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b *BaseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. +func (b *BaseError) OrigErr() error { + return b.origErr +} + +// ErrorWithExtra is a helper method to add an extra string to the stratified +// error message. The extra message will be added on the next line below the +// error message like the following: +// +// : +// +// +// If there is a original error the error will be included on a new line. +// +// : +// +// caused by: +func (b *BaseError) ErrorWithExtra(extra string) string { + msg := fmt.Sprintf("%s: %s", b.code, b.message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if b.origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, b.origErr.Error()) + } + return msg +} + +// A RequestError wraps a request or service error. +// +// Composed of BaseError for code, message, and original error. +type RequestError struct { + *BaseError + statusCode int + requestID string +} + +// NewRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the BaseError. +func NewRequestError(base *BaseError, statusCode int, requestID string) *RequestError { + return &RequestError{ + BaseError: base, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r *RequestError) Error() string { + return r.ErrorWithExtra(fmt.Sprintf("status code: %d, request id: [%s]", + r.statusCode, r.requestID)) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r *RequestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r *RequestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r *RequestError) RequestID() string { + return r.requestID +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.go new file mode 100644 index 0000000..d040ccc --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.go @@ -0,0 +1,31 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import "strings" + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + return + } + } + return +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.json b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.json new file mode 100644 index 0000000..4c58809 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints.json @@ -0,0 +1,77 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints_map.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints_map.go new file mode 100644 index 0000000..894c1a6 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/endpoints/endpoints_map.go @@ -0,0 +1,89 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/build.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/build.go new file mode 100644 index 0000000..0d3c0af --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/build.go @@ -0,0 +1,33 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go + +import ( + "net/url" + + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil" +) + +// Build builds a request for an AWS Query service. +func Build(r *aws.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.Service.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = apierr.New("Marshal", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000..3b417a8 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go @@ -0,0 +1,223 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + value := elemOf(value.Field(i)) + field := t.Field(i) + var name string + + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, value, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal.go new file mode 100644 index 0000000..db68867 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal.go @@ -0,0 +1,29 @@ +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = apierr.New("Unmarshal", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *aws.Request) { + // TODO implement unmarshaling of request IDs +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal_error.go new file mode 100644 index 0000000..ee9a08e --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/unmarshal_error.go @@ -0,0 +1,44 @@ +package query + +import ( + "encoding/xml" + "io" + "io/ioutil" + "log" + + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +type XmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + StatusCode int `"StatusCode"` + Message string `xml:"Message"` + Resource string `xml:"Resource"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &XmlErrorResponse{} + body, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + log.Printf("read body err, %v\n", err) + return + } + err = xml.Unmarshal(body, &resp) + resp.StatusCode = r.HTTPResponse.StatusCode + + if err != nil && err != io.EOF { + r.Error = apierr.New("Unmarshal", "failed to decode query XML error response", err) + } else { + r.Error = apierr.NewRequestError( + apierr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/build.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/build.go new file mode 100644 index 0000000..b5afe29 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/build.go @@ -0,0 +1,225 @@ +// Package rest provides RESTful serialisation of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// Build builds the REST component of a service request. +func Build(r *aws.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *aws.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + switch field.Tag.Get("location") { + case "headers": // header maps + buildHeaderMap(r, m, field.Tag.Get("locationName")) + case "header": + buildHeader(r, m, name) + case "uri": + buildURI(r, m, name) + case "querystring": + buildQueryString(r, m, name, query) + } + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = apierr.New("Marshal", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(r *aws.Request, v reflect.Value, name string) { + str, err := convertType(v) + if err != nil { + r.Error = apierr.New("Marshal", "failed to encode REST request", err) + } else if str != nil { + r.HTTPRequest.Header.Add(name, *str) + } +} + +func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err != nil { + r.Error = apierr.New("Marshal", "failed to encode REST request", err) + } else if str != nil { + r.HTTPRequest.Header.Add(prefix+key.String(), *str) + } + } +} + +func buildURI(r *aws.Request, v reflect.Value, name string) { + value, err := convertType(v) + if err != nil { + r.Error = apierr.New("Marshal", "failed to encode REST request", err) + } else if value != nil { + uri := r.HTTPRequest.URL.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1) + r.HTTPRequest.URL.Path = uri + } +} + +func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) { + str, err := convertType(v) + if err != nil { + r.Error = apierr.New("Marshal", "failed to encode REST request", err) + } else if str != nil { + query.Set(name, *str) + } +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + //path.Clean会去掉最后的斜杠,导致无法创建目录。所以添加以下逻辑 + add := false + if urlPath[len(urlPath)-1] == '/'&&len(urlPath)>1{ + add = true + } + + // path.Clean will remove duplicate leading / + // this will make deleting / started key impossible + // so escape it here first + urlPath = strings.Replace(urlPath, "//", "/%2F", -1) + + // clean up path + urlPath = path.Clean(urlPath) + if add{ + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (*string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return nil, nil + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return nil, err + } + return &str, nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/payload.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/payload.go new file mode 100644 index 0000000..1f603bb --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/unmarshal.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/unmarshal.go new file mode 100644 index 0000000..715c196 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest/unmarshal.go @@ -0,0 +1,174 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" +) + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *aws.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = apierr.New("Unmarshal", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = apierr.New("Unmarshal", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = apierr.New("Unmarshal", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *aws.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = apierr.New("Unmarshal", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = apierr.New("Unmarshal", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml/restxml.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml/restxml.go new file mode 100644 index 0000000..e281e3b --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml/restxml.go @@ -0,0 +1,54 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/rest-xml.json build_test.go +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/apierr" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/query" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +// Build builds a request payload for the REST XML protocol. +func Build(r *aws.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = apierr.New("Marshal", "failed to enode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *aws.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = apierr.New("Unmarshal", "failed to decode REST XML response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *aws.Request) { + rest.Unmarshal(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *aws.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000..d3db250 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/build.go @@ -0,0 +1,287 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + member := elemOf(value.Field(i)) + field := t.Field(i) + mTag := field.Tag + + if mTag.Get("location") != "" { // skip non-body members + continue + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000..d5535d3 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,263 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("SDKShapeTraits"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + t, err = time.Parse("2006-01-02T15:04:05-07:00", node.Text) + if err != nil { + return err + } + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000..72c198a --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/internal/signer/v2/v2.go b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/signer/v2/v2.go new file mode 100644 index 0000000..28605e7 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/internal/signer/v2/v2.go @@ -0,0 +1,348 @@ +package v2 + +import( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/ks3sdklib/aws-sdk-go/aws/credentials" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest" + + "github.com/ks3sdklib/aws-sdk-go/aws" +) + +const ( + authHeaderPrefix = "AWS" + timeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" +) +var signQuerys = map[string]bool { + "acl":true, + "lifecycle":true, + "location":true, + "logging":true, + "notification":true, + "policy":true, + "requestPayment":true, + "torrent":true, + "uploadId":true, + "uploads":true, + "versionId":true, + "versioning":true, + "versions":true, + "website":true, + "delete":true, + "thumbnail":true, + "cors":true, + "pfop":true, + "querypfop":true, + "partNumber":true, + "response-content-type":true, + "response-content-language":true, + "response-expires":true, + "response-cache-control":true, + "response-content-disposition":true, + "response-content-encoding":true, + "tagging":true, + "fetch":true, + "copy":true, + "mirror":true, +} + +type signer struct { + Service *aws.Service + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug uint + Logger io.Writer + + isPresign bool + formattedTime string + + canonicalHeaders string + canonicalResource string + stringToSign string + signature string + authorization string +} + +func Sign(req *aws.Request) { + if req.Service.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.Service.SigningRegion + if region == "" { + region = req.Service.Config.Region + } + + name := req.Service.SigningName + if name == "" { + name = req.Service.ServiceName + } + + s := signer{ + Service: req.Service, + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Service.Config.Credentials, + Debug: req.Service.Config.LogLevel, + Logger: req.Service.Config.Logger, + } + + req.Error = s.sign() +} + +func (v2 *signer) sign() error { + if v2.ExpireTime != 0 { + v2.isPresign = true + } + + if v2.isRequestSigned() { + if !v2.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v2.isPresign { + v2.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v2.Request.URL.RawQuery = v2.Query.Encode() + } + } + + var err error + v2.CredValues, err = v2.Credentials.Get() + if err != nil { + return err + } + + + v2.build() + + if v2.Debug > 0 { + v2.logSigningInfo() + } + + return nil +} + +func (v2 *signer) logSigningInfo() { + out := v2.Logger + fmt.Fprintf(out, "---[ STRING TO SIGN ]--------------------------------\n") + fmt.Fprintln(out, v2.stringToSign) + if v2.isPresign { + fmt.Fprintf(out, "---[ SIGNED URL ]--------------------------------\n") + fmt.Fprintln(out, v2.Request.URL) + } + fmt.Fprintf(out, "-----------------------------------------------------\n") +} + +func (v2 *signer) build() { + + v2.buildTime() // no depends + v2.buildCanonicalHeaders() // depends on cred string + v2.buildCanonicalResource() // depends on canon headers / signed headers + v2.buildStringToSign() // depends on canon string + v2.buildSignature() // depends on string to sign + + if v2.isPresign { + var querys url.Values + querys = make(map[string][]string) + querys.Add("Signature",v2.signature) + querys.Add("AWSAccessKeyId",v2.CredValues.AccessKeyID) + v2.Request.URL.RawQuery+= "&"+querys.Encode() + } else { + v2.Request.Header.Set("Authorization","AWS "+v2.CredValues.AccessKeyID+":"+v2.signature) + } +} + +func (v2 *signer) buildTime() { + v2.formattedTime = v2.Time.UTC().Format(timeFormat) + + if v2.isPresign { + duration := int64(v2.ExpireTime / time.Second) + v2.Query.Set("Expires", strconv.FormatInt(duration, 10)) + } else { + v2.Request.Header.Set("Date", v2.formattedTime) + } +} + +func (v2 *signer) buildCanonicalHeaders() { + var headers []string + for k ,v := range v2.Request.Header { + if strings.HasPrefix(k, "X-Amz-"){ + key:= strings.ToLower(http.CanonicalHeaderKey(k)); + v2.Request.Header[key] = v; + v2.Request.Header.Del(http.CanonicalHeaderKey(k)) + headers = append(headers, key) + } + } + sort.Strings(headers) + + headerValues := make([]string, len(headers)) + for i, k := range headers { + key:= strings.ToLower(http.CanonicalHeaderKey(k)); + headerValues[i] = key + ":" +strings.Join(v2.Request.Header[key], ",") + } + + v2.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v2 *signer) buildCanonicalResource(){ + endpoint := v2.Service.Endpoint + + v2.Request.URL.RawQuery = strings.Replace(v2.Query.Encode(), "+", "%20", -1) + url := v2.Request.URL.String() + //在aws.service.go,buildEndpoint会把sheme也加上 + pathStyle := strings.HasPrefix(url,endpoint) + uri := v2.Request.URL.Opaque + + bucketInHost:="" + if !pathStyle{ + if strings.HasPrefix(url,"http://") { + url = url[7:] + endpoint = endpoint[7:] + }else if strings.HasPrefix(url,"https://") { + url = url[8:] + endpoint = endpoint[8:] + } + bucketInHost = url[0:strings.Index(url,endpoint)-1] + } + if uri != "" { + uris := strings.Split(uri, "/")[3:] + append := false + if len(uris) == 1 && uris[0]!=""&&bucketInHost == ""{ + //只有bucket + append = true + }else if len(uris) == 0 && bucketInHost != ""{ + append = true + } + uri = "/" + strings.Join(strings.Split(uri, "/")[3:],"/") + if bucketInHost != ""{ + uri = "/"+bucketInHost +uri + } + if append{ + uri += "/" + } + } else { + uri = v2.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v2.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + var querys []string + for k := range v2.Query { + if _, ok := signQuerys[k]; ok { + querys = append(querys,k) + } + } + sort.Strings(querys) + + queryValues := make([]string, len(querys)) + for i, k := range querys { + v := v2.Query[k] + vString := strings.Join(v,",") + if vString != ""{ + queryValues[i] = k + "=" + vString; + }else{ + queryValues[i] = k + } + } + queryString := strings.Join(queryValues, "&") + if queryString == ""{ + v2.canonicalResource = uri + }else{ + v2.canonicalResource = uri + "?" + queryString + } +} + +func (v2 *signer) buildStringToSign() { + md5list := v2.Request.Header["Content-Md5"] + md5 := "" + if len(md5list)>0{ + md5 = v2.Request.Header["Content-Md5"][0] + } + + typelist := v2.Request.Header["Content-Type"] + contenttype := "" + if len(typelist)>0{ + contenttype = v2.Request.Header["Content-Type"][0] + } + + signItems := [] string{v2.Request.Method,md5,contenttype} + if v2.isPresign { + signItems = append(signItems,v2.Query["Expires"][0]) + }else{ + signItems = append(signItems,v2.formattedTime) + } + if v2.canonicalHeaders != ""{ + signItems = append(signItems,v2.canonicalHeaders) + } + signItems = append(signItems,v2.canonicalResource) + + v2.stringToSign = strings.Join(signItems, "\n") + +} + +func (v2 *signer) buildSignature() { + secret := v2.CredValues.SecretAccessKey + signature := string(base64Encode(makeHmac([]byte(secret), []byte(v2.stringToSign)))) + v2.signature = signature +} +// isRequestSigned returns if the request is currently signed or presigned +func (v2 *signer) isRequestSigned() bool { + if v2.isPresign && v2.Query.Get("Signature") != "" { + return true + } + if v2.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v2 *signer) removePresign() { + v2.Query.Del("AWSAccessKeyId") + v2.Query.Del("Signature") + v2.Query.Del("Expires") + +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha1.New, key) + hash.Write(data) + return hash.Sum(nil) +} +func base64Encode(src []byte) []byte { + return []byte(base64.StdEncoding.EncodeToString(src)) +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/api.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/api.go new file mode 100644 index 0000000..91b44aa --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/api.go @@ -0,0 +1,6486 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + mapset "github.com/deckarep/golang-set" + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/aws/awserr" + "hash" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" + "sync" + "time" +) + +var oprw sync.Mutex + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *aws.Request, output *AbortMultipartUploadOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opAbortMultipartUpload == nil { + opAbortMultipartUpload = &aws.Operation{ + Name: "AbortMultipartUpload", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(opAbortMultipartUpload, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} +func (c *S3) AbortMultipartUploadPresignedUrl(input *AbortMultipartUploadInput, expires time.Duration) (*url.URL, error) { + req, _ := c.AbortMultipartUploadRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opAbortMultipartUpload *aws.Operation + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *aws.Request, output *CompleteMultipartUploadOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opCompleteMultipartUpload == nil { + opCompleteMultipartUpload = &aws.Operation{ + Name: "CompleteMultipartUpload", + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(opCompleteMultipartUpload, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} +func (c *S3) CompleteMultipartUploadPresignedUrl(input *CompleteMultipartUploadInput, expires time.Duration) (*url.URL, error) { + req, _ := c.CompleteMultipartUploadRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opCompleteMultipartUpload *aws.Operation + +// CopyObjectRequest generates a request for the CopyObject operation. +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *aws.Request, output *CopyObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opCopyObject == nil { + opCopyObject = &aws.Operation{ + Name: "CopyObject", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(opCopyObject, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} +func (c *S3) CopyObjectPresignedUrl(input *CopyObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.CopyObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opCopyObject *aws.Operation + +// CreateBucketRequest generates a request for the CreateBucket operation. +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *aws.Request, output *CreateBucketOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opCreateBucket == nil { + if input.ProjectId == nil { + opCreateBucket = &aws.Operation{ + Name: "CreateBucket", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + } else { + opCreateBucket = &aws.Operation{ + Name: "CreateBucket", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?projectId=" + *input.ProjectId, + } + } + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(opCreateBucket, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} +func (c *S3) CreateBucketPresignedUrl(input *CreateBucketInput, expires time.Duration) (*url.URL, error) { + req, _ := c.CreateBucketRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opCreateBucket *aws.Operation + +// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *aws.Request, output *CreateMultipartUploadOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opCreateMultipartUpload == nil { + opCreateMultipartUpload = &aws.Operation{ + Name: "CreateMultipartUpload", + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(opCreateMultipartUpload, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, +// you must either complete or abort multipart upload in order to stop getting +// charged for storage of the uploaded parts. Only after you either complete +// or abort multipart upload, Amazon S3 frees up the parts storage and stops +// charging you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} +func (c *S3) CreateMultipartUploadPresignedUrl(input *CreateMultipartUploadInput, expires time.Duration) (*url.URL, error) { + req, _ := c.CreateMultipartUploadRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opCreateMultipartUpload *aws.Operation + +// DeleteBucketRequest generates a request for the DeleteBucket operation. +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *aws.Request, output *DeleteBucketOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucket == nil { + opDeleteBucket = &aws.Operation{ + Name: "DeleteBucket", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(opDeleteBucket, input, output) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketPresignedUrl(input *DeleteBucketInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucket *aws.Operation + +// DeleteBucketCORSRequest generates a request for the DeleteBucketCORS operation. +func (c *S3) DeleteBucketCORSRequest(input *DeleteBucketCORSInput) (req *aws.Request, output *DeleteBucketCORSOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketCORS == nil { + opDeleteBucketCORS = &aws.Operation{ + Name: "DeleteBucketCors", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + } + + if input == nil { + input = &DeleteBucketCORSInput{} + } + + req = c.newRequest(opDeleteBucketCORS, input, output) + output = &DeleteBucketCORSOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCORS(input *DeleteBucketCORSInput) (*DeleteBucketCORSOutput, error) { + req, out := c.DeleteBucketCORSRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketCORSPresignedUrl(input *DeleteBucketCORSInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketCORSRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketCORS *aws.Operation + +// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *aws.Request, output *DeleteBucketLifecycleOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketLifecycle == nil { + opDeleteBucketLifecycle = &aws.Operation{ + Name: "DeleteBucketLifecycle", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(opDeleteBucketLifecycle, input, output) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketLifecyclePresignedUrl(input *DeleteBucketLifecycleInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketLifecycleRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketLifecycle *aws.Operation + +// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *aws.Request, output *DeleteBucketPolicyOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketPolicy == nil { + opDeleteBucketPolicy = &aws.Operation{ + Name: "DeleteBucketPolicy", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(opDeleteBucketPolicy, input, output) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketPolicyPresignedUrl(input *DeleteBucketPolicyInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketPolicyRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketPolicy *aws.Operation + +// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *aws.Request, output *DeleteBucketReplicationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketReplication == nil { + opDeleteBucketReplication = &aws.Operation{ + Name: "DeleteBucketReplication", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(opDeleteBucketReplication, input, output) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketReplicationPresignedUrl(input *DeleteBucketReplicationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketReplicationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketReplication *aws.Operation + +// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *aws.Request, output *DeleteBucketTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketTagging == nil { + opDeleteBucketTagging = &aws.Operation{ + Name: "DeleteBucketTagging", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(opDeleteBucketTagging, input, output) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketTaggingPresignedUrl(input *DeleteBucketTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketTagging *aws.Operation + +// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *aws.Request, output *DeleteBucketWebsiteOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteBucketWebsite == nil { + opDeleteBucketWebsite = &aws.Operation{ + Name: "DeleteBucketWebsite", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(opDeleteBucketWebsite, input, output) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteBucketWebsitePresignedUrl(input *DeleteBucketWebsiteInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteBucketWebsiteRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteBucketWebsite *aws.Operation + +// DeleteObjectRequest generates a request for the DeleteObject operation. +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *aws.Request, output *DeleteObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteObject == nil { + opDeleteObject = &aws.Operation{ + Name: "DeleteObject", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(opDeleteObject, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteObjectPresignedUrl(input *DeleteObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteObject *aws.Operation + +// DeleteObjectsRequest generates a request for the DeleteObjects operation. +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *aws.Request, output *DeleteObjectsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteObjects == nil { + opDeleteObjects = &aws.Operation{ + Name: "DeleteObjects", + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(opDeleteObjects, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) *DeleteObjectsOutput { + var errors []*Error + var okList []*DeletedObject + if input == nil { + input = &DeleteObjectsInput{} + } + for _, t := range input.Delete.Objects { + _, err := c.DeleteObject(&DeleteObjectInput{Bucket: input.Bucket, Key: t.Key}) + if input.IsReTurnResults == aws.Boolean(false) { + if err != nil { + aerr, _ := err.(awserr.Error) + errors = append(errors, &Error{Key: t.Key, Code: aws.String(aerr.Code()), Message: aws.String(aerr.Message())}) + } else { + okList = append(okList, &DeletedObject{Key: t.Key}) + } + } + } + output := &DeleteObjectsOutput{ + Deleted: okList, + Errors: errors, + } + return output +} +func (c *S3) DeleteBucketPrefix(input *DeleteBucketPrefixInput) (*DeleteObjectsOutput, error) { + + var errors []*Error + var okList []*DeletedObject + + marker := aws.String("") + prefix := input.Prefix + var output = &DeleteObjectsOutput{ + Deleted: okList, + Errors: errors, + } + if input == nil { + input = &DeleteBucketPrefixInput{} + } + for { + resp, err := c.ListObjects(&ListObjectsInput{ + Bucket: input.Bucket, + Prefix: prefix, + Marker: marker, + MaxKeys: aws.Long(1000), + }) + if err == nil { + for _, t := range resp.Contents { + _, err := c.DeleteObject(&DeleteObjectInput{Bucket: input.Bucket, Key: t.Key}) + if input.IsReTurnResults == aws.Boolean(false) { + if err != nil { + aerr, _ := err.(awserr.Error) + errors = append(errors, &Error{Key: t.Key, Code: aws.String(aerr.Code()), Message: aws.String(aerr.Message())}) + output.Errors = errors + } else { + okList = append(okList, &DeletedObject{Key: t.Key}) + output.Deleted = okList + } + } + } + if *resp.IsTruncated == false { + break + } + marker = resp.Contents[999].Key + } else { + return output, err + } + } + return output, nil +} + +/** +重试删除前缀 +*/ +func (c *S3) TryDeleteBucketPrefix(input *DeleteBucketPrefixInput) (*DeleteObjectsOutput, error) { + + params := input + var output *DeleteObjectsOutput + err := Do(func(attempt int) (bool, error) { + var err error + output, err = c.DeleteBucketPrefix(params) + if err != nil { + fmt.Println("DeleteBucketPrefix - ", err) + } + return attempt < 3, err // 重试3次 + }) + if err != nil { + fmt.Println("error:", err) + } + return output, nil +} + +func (c *S3) DeleteObjectsPresignedUrl(input *DeleteObjectsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteObjectsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteObjects *aws.Operation + +// GetBucketACLRequest generates a request for the GetBucketACL operation. +func (c *S3) GetBucketACLRequest(input *GetBucketACLInput) (req *aws.Request, output *GetBucketACLOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketACL == nil { + opGetBucketACL = &aws.Operation{ + Name: "GetBucketAcl", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + } + + if input == nil { + input = &GetBucketACLInput{} + } + + req = c.newRequest(opGetBucketACL, input, output) + output = &GetBucketACLOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketACL(input *GetBucketACLInput) (*GetBucketACLOutput, error) { + req, out := c.GetBucketACLRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketACLPresignedUrl(input *GetBucketACLInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketACLRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketACL *aws.Operation + +// GetBucketCORSRequest generates a request for the GetBucketCORS operation. +func (c *S3) GetBucketCORSRequest(input *GetBucketCORSInput) (req *aws.Request, output *GetBucketCORSOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketCORS == nil { + opGetBucketCORS = &aws.Operation{ + Name: "GetBucketCors", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + } + + if input == nil { + input = &GetBucketCORSInput{} + } + + req = c.newRequest(opGetBucketCORS, input, output) + output = &GetBucketCORSOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCORS(input *GetBucketCORSInput) (*GetBucketCORSOutput, error) { + req, out := c.GetBucketCORSRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketCORSPresignedUrl(input *GetBucketCORSInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketCORSRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketCORS *aws.Operation + +// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *aws.Request, output *GetBucketLifecycleOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketLifecycle == nil { + opGetBucketLifecycle = &aws.Operation{ + Name: "GetBucketLifecycle", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(opGetBucketLifecycle, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketLifecyclePresignedUrl(input *GetBucketLifecycleInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketLifecycleRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketLifecycle *aws.Operation + +// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *aws.Request, output *GetBucketLocationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketLocation == nil { + opGetBucketLocation = &aws.Operation{ + Name: "GetBucketLocation", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(opGetBucketLocation, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketLocationPresignedUrl(input *GetBucketLocationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketLocationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketLocation *aws.Operation + +// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *aws.Request, output *GetBucketLoggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketLogging == nil { + opGetBucketLogging = &aws.Operation{ + Name: "GetBucketLogging", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(opGetBucketLogging, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketLoggingPresignedUrl(input *GetBucketLoggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketLoggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketLogging *aws.Operation + +// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfigurationDeprecated) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketNotification == nil { + opGetBucketNotification = &aws.Operation{ + Name: "GetBucketNotification", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(opGetBucketNotification, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketNotificationPresignedUrl(input *GetBucketNotificationConfigurationRequest, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketNotificationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketNotification *aws.Operation + +// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfiguration) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketNotificationConfiguration == nil { + opGetBucketNotificationConfiguration = &aws.Operation{ + Name: "GetBucketNotificationConfiguration", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(opGetBucketNotificationConfiguration, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketNotificationConfigurationPresignedUrl(input *GetBucketNotificationConfigurationRequest, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketNotificationConfigurationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketNotificationConfiguration *aws.Operation + +// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *aws.Request, output *GetBucketPolicyOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketPolicy == nil { + opGetBucketPolicy = &aws.Operation{ + Name: "GetBucketPolicy", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(opGetBucketPolicy, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketPolicyPresignedUrl(input *GetBucketPolicyInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketPolicyRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketPolicy *aws.Operation + +// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *aws.Request, output *GetBucketReplicationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketReplication == nil { + opGetBucketReplication = &aws.Operation{ + Name: "GetBucketReplication", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(opGetBucketReplication, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketReplicationPresignedUrl(input *GetBucketReplicationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketReplicationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketReplication *aws.Operation + +// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *aws.Request, output *GetBucketRequestPaymentOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketRequestPayment == nil { + opGetBucketRequestPayment = &aws.Operation{ + Name: "GetBucketRequestPayment", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(opGetBucketRequestPayment, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketRequestPaymentPresignedUrl(input *GetBucketRequestPaymentInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketRequestPaymentRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketRequestPayment *aws.Operation + +// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *aws.Request, output *GetBucketTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketTagging == nil { + opGetBucketTagging = &aws.Operation{ + Name: "GetBucketTagging", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(opGetBucketTagging, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketTaggingPresignedUrl(input *GetBucketTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketTagging *aws.Operation + +// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *aws.Request, output *GetBucketVersioningOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketVersioning == nil { + opGetBucketVersioning = &aws.Operation{ + Name: "GetBucketVersioning", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(opGetBucketVersioning, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketVersioningPresignedUrl(input *GetBucketVersioningInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketVersioningRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketVersioning *aws.Operation + +// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *aws.Request, output *GetBucketWebsiteOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetBucketWebsite == nil { + opGetBucketWebsite = &aws.Operation{ + Name: "GetBucketWebsite", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(opGetBucketWebsite, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetBucketWebsitePresignedUrl(input *GetBucketWebsiteInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetBucketWebsiteRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetBucketWebsite *aws.Operation + +// GetObjectRequest generates a request for the GetObject operation. +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *aws.Request, output *GetObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetObject == nil { + opGetObject = &aws.Operation{ + Name: "GetObject", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(opGetObject, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetObjectPresignedUrl(input *GetObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetObject *aws.Operation + +// GetObjectACLRequest generates a request for the GetObjectACL operation. +func (c *S3) GetObjectACLRequest(input *GetObjectACLInput) (req *aws.Request, output *GetObjectACLOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetObjectACL == nil { + opGetObjectACL = &aws.Operation{ + Name: "GetObjectAcl", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + } + + if input == nil { + input = &GetObjectACLInput{} + } + + req = c.newRequest(opGetObjectACL, input, output) + output = &GetObjectACLOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectACL(input *GetObjectACLInput) (*GetObjectACLOutput, error) { + req, out := c.GetObjectACLRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetObjectACLPresignedUrl(input *GetObjectACLInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetObjectACLRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetObjectACL *aws.Operation + +// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *aws.Request, output *GetObjectTorrentOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetObjectTorrent == nil { + opGetObjectTorrent = &aws.Operation{ + Name: "GetObjectTorrent", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(opGetObjectTorrent, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetObjectTorrentPresignedUrl(input *GetObjectTorrentInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetObjectTorrentRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetObjectTorrent *aws.Operation + +// HeadBucketRequest generates a request for the HeadBucket operation. +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *aws.Request, output *HeadBucketOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opHeadBucket == nil { + opHeadBucket = &aws.Operation{ + Name: "HeadBucket", + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(opHeadBucket, input, output) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} +func (c *S3) HeadBucketPresignedUrl(input *HeadBucketInput, expires time.Duration) (*url.URL, error) { + req, _ := c.HeadBucketRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opHeadBucket *aws.Operation + +// HeadObjectRequest generates a request for the HeadObject operation. +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *aws.Request, output *HeadObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opHeadObject == nil { + opHeadObject = &aws.Operation{ + Name: "HeadObject", + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(opHeadObject, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} +func (c *S3) HeadObjectPresignedUrl(input *HeadObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.HeadObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opHeadObject *aws.Operation + +// ListBucketsRequest generates a request for the ListBuckets operation. +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *aws.Request, output *ListBucketsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opListBuckets == nil { + opListBuckets = &aws.Operation{ + Name: "ListBuckets", + HTTPMethod: "GET", + HTTPPath: "/", + } + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(opListBuckets, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} +func (c *S3) ListBucketsPresignedUrl(input *ListBucketsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.ListBucketsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opListBuckets *aws.Operation + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *aws.Request, output *ListMultipartUploadsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opListMultipartUploads == nil { + opListMultipartUploads = &aws.Operation{ + Name: "ListMultipartUploads", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &aws.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(opListMultipartUploads, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} +func (c *S3) ListMultipartUploadsPresignedUrl(input *ListMultipartUploadsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.ListMultipartUploadsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +var opListMultipartUploads *aws.Operation + +// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *aws.Request, output *ListObjectVersionsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opListObjectVersions == nil { + opListObjectVersions = &aws.Operation{ + Name: "ListObjectVersions", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &aws.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(opListObjectVersions, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} +func (c *S3) ListObjectVersionsPresignedUrl(input *ListObjectVersionsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.ListObjectVersionsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +var opListObjectVersions *aws.Operation + +// ListObjectsRequest generates a request for the ListObjects operation. +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *aws.Request, output *ListObjectsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opListObjects == nil { + opListObjects = &aws.Operation{ + Name: "ListObjects", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(opListObjects, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} +func (c *S3) ListObjectsPresignedUrl(input *ListObjectsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.ListObjectsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +var opListObjects *aws.Operation + +// ListPartsRequest generates a request for the ListParts operation. +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *aws.Request, output *ListPartsOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opListParts == nil { + opListParts = &aws.Operation{ + Name: "ListParts", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &aws.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(opListParts, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} +func (c *S3) ListPartsPresignedUrl(input *ListPartsInput, expires time.Duration) (*url.URL, error) { + req, _ := c.ListPartsRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +var opListParts *aws.Operation + +// PutBucketACLRequest generates a request for the PutBucketACL operation. +func (c *S3) PutBucketACLRequest(input *PutBucketACLInput) (req *aws.Request, output *PutBucketACLOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketACL == nil { + opPutBucketACL = &aws.Operation{ + Name: "PutBucketAcl", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + } + + if input == nil { + input = &PutBucketACLInput{} + } + + req = c.newRequest(opPutBucketACL, input, output) + output = &PutBucketACLOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketACL(input *PutBucketACLInput) (*PutBucketACLOutput, error) { + req, out := c.PutBucketACLRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketACLPresignedUrl(input *PutBucketACLInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketACLRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketACL *aws.Operation + +// PutBucketCORSRequest generates a request for the PutBucketCORS operation. +func (c *S3) PutBucketCORSRequest(input *PutBucketCORSInput) (req *aws.Request, output *PutBucketCORSOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketCORS == nil { + opPutBucketCORS = &aws.Operation{ + Name: "PutBucketCors", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + } + + if input == nil { + input = &PutBucketCORSInput{} + } + + req = c.newRequest(opPutBucketCORS, input, output) + output = &PutBucketCORSOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCORS(input *PutBucketCORSInput) (*PutBucketCORSOutput, error) { + req, out := c.PutBucketCORSRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketCORSPresignedUrl(input *PutBucketCORSInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketCORSRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketCORS *aws.Operation + +// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *aws.Request, output *PutBucketLifecycleOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketLifecycle == nil { + opPutBucketLifecycle = &aws.Operation{ + Name: "PutBucketLifecycle", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(opPutBucketLifecycle, input, output) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketLifecyclePresignedUrl(input *PutBucketLifecycleInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketLifecycleRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketLifecycle *aws.Operation + +// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *aws.Request, output *PutBucketLoggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketLogging == nil { + opPutBucketLogging = &aws.Operation{ + Name: "PutBucketLogging", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(opPutBucketLogging, input, output) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketLoggingPresignedUrl(input *PutBucketLoggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketLoggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketLogging *aws.Operation + +// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *aws.Request, output *PutBucketNotificationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketNotification == nil { + opPutBucketNotification = &aws.Operation{ + Name: "PutBucketNotification", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(opPutBucketNotification, input, output) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketNotificationPresignedUrl(input *PutBucketNotificationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketNotificationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketNotification *aws.Operation + +// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *aws.Request, output *PutBucketNotificationConfigurationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketNotificationConfiguration == nil { + opPutBucketNotificationConfiguration = &aws.Operation{ + Name: "PutBucketNotificationConfiguration", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(opPutBucketNotificationConfiguration, input, output) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketNotificationConfigurationPresignedUrl(input *PutBucketNotificationConfigurationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketNotificationConfigurationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketNotificationConfiguration *aws.Operation + +// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *aws.Request, output *PutBucketPolicyOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketPolicy == nil { + opPutBucketPolicy = &aws.Operation{ + Name: "PutBucketPolicy", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(opPutBucketPolicy, input, output) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketPolicyPresignedUrl(input *PutBucketPolicyInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketPolicyRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketPolicy *aws.Operation + +// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *aws.Request, output *PutBucketReplicationOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketReplication == nil { + opPutBucketReplication = &aws.Operation{ + Name: "PutBucketReplication", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(opPutBucketReplication, input, output) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketReplicationPresignedUrl(input *PutBucketReplicationInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketReplicationRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketReplication *aws.Operation + +// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *aws.Request, output *PutBucketRequestPaymentOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketRequestPayment == nil { + opPutBucketRequestPayment = &aws.Operation{ + Name: "PutBucketRequestPayment", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(opPutBucketRequestPayment, input, output) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketRequestPaymentPresignedUrl(input *PutBucketRequestPaymentInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketRequestPaymentRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketRequestPayment *aws.Operation + +// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *aws.Request, output *PutBucketTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketTagging == nil { + opPutBucketTagging = &aws.Operation{ + Name: "PutBucketTagging", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(opPutBucketTagging, input, output) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketTaggingPresignedUrl(input *PutBucketTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketTagging *aws.Operation + +// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *aws.Request, output *PutBucketVersioningOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketVersioning == nil { + opPutBucketVersioning = &aws.Operation{ + Name: "PutBucketVersioning", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(opPutBucketVersioning, input, output) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketVersioningPresignedUrl(input *PutBucketVersioningInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketVersioningRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketVersioning *aws.Operation + +// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *aws.Request, output *PutBucketWebsiteOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutBucketWebsite == nil { + opPutBucketWebsite = &aws.Operation{ + Name: "PutBucketWebsite", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(opPutBucketWebsite, input, output) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutBucketWebsitePresignedUrl(input *PutBucketWebsiteInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutBucketWebsiteRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutBucketWebsite *aws.Operation + +// PutObjectRequest generates a request for the PutObject operation. +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *aws.Request, output *PutObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutObject == nil { + opPutObject = &aws.Operation{ + Name: "PutObject", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(opPutObject, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} +func (c *S3) PutReader(input *PutReaderRequest) (*PutObjectOutput, error) { + oprw.Lock() + defer oprw.Unlock() + + if opPutObject == nil { + opPutObject = &aws.Operation{ + Name: "PutObject", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &PutReaderRequest{} + } + out := &PutObjectOutput{} + req := c.newRequest(opPutObject, input, out) + req.Data = out + err := req.Send() + return out, err +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) PutObjectPresignedUrl(input *PutObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +func (c *S3) PutObjectPreassignedInput(input *PutObjectInput) (*http.Request, error) { + req, _ := c.PutObjectRequest(input) + err := req.Sign() + return req.HTTPRequest, err +} + +var opPutObject *aws.Operation + +// PutObjectACLRequest generates a request for the PutObjectACL operation. +func (c *S3) PutObjectACLRequest(input *PutObjectACLInput) (req *aws.Request, output *PutObjectACLOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutObjectACL == nil { + opPutObjectACL = &aws.Operation{ + Name: "PutObjectAcl", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + } + + if input == nil { + input = &PutObjectACLInput{} + } + + req = c.newRequest(opPutObjectACL, input, output) + output = &PutObjectACLOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectACL(input *PutObjectACLInput) (*PutObjectACLOutput, error) { + req, out := c.PutObjectACLRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutObjectACLPresignedUrl(input *PutObjectACLInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutObjectACLRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutObjectACL *aws.Operation + +// RestoreObjectRequest generates a request for the RestoreObject operation. +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *aws.Request, output *RestoreObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opRestoreObject == nil { + opRestoreObject = &aws.Operation{ + Name: "RestoreObject", + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(opRestoreObject, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +func (c *S3) RestoreObjectPresignedUrl(input *RestoreObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.RestoreObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opRestoreObject *aws.Operation + +// UploadPartRequest generates a request for the UploadPart operation. +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *aws.Request, output *UploadPartOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opUploadPart == nil { + opUploadPart = &aws.Operation{ + Name: "UploadPart", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(opUploadPart, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, +// you must either complete or abort multipart upload in order to stop getting +// charged for storage of the uploaded parts. Only after you either complete +// or abort multipart upload, Amazon S3 frees up the parts storage and stops +// charging you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} +func (c *S3) UploadPartPresignedUrl(input *UploadPartInput, expires time.Duration) (*url.URL, error) { + req, _ := c.UploadPartRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opUploadPart *aws.Operation + +// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *aws.Request, output *UploadPartCopyOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opUploadPartCopy == nil { + opUploadPartCopy = &aws.Operation{ + Name: "UploadPartCopy", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(opUploadPartCopy, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} +func (c *S3) UploadPartCopyPresignedUrl(input *UploadPartCopyInput, expires time.Duration) (*url.URL, error) { + req, _ := c.UploadPartCopyRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opUploadPartCopy *aws.Operation + +type AbortMultipartUploadInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataAbortMultipartUploadInput `json:"-" xml:"-"` +} + +type metadataAbortMultipartUploadInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type AbortMultipartUploadOutput struct { + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + metadataAbortMultipartUploadOutput `json:"-" xml:"-"` +} + +type metadataAbortMultipartUploadOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type AccessControlPolicy struct { + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + metadataAccessControlPolicy `json:"-" xml:"-"` +} + +type metadataAccessControlPolicy struct { + SDKShapeTraits bool `type:"structure"` +} + +type Bucket struct { + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` + + Region *string `type:"string"` + + metadataBucket `json:"-" xml:"-"` +} + +type metadataBucket struct { + SDKShapeTraits bool `type:"structure"` +} + +type BucketLoggingStatus struct { + LoggingEnabled *LoggingEnabled `type:"structure"` + + metadataBucketLoggingStatus `json:"-" xml:"-"` +} + +type metadataBucketLoggingStatus struct { + SDKShapeTraits bool `type:"structure"` +} + +type CORSConfiguration struct { + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` + + metadataCORSConfiguration `json:"-" xml:"-"` +} + +type metadataCORSConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type CORSRule struct { + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` + + metadataCORSRule `json:"-" xml:"-"` +} + +type metadataCORSRule struct { + SDKShapeTraits bool `type:"structure"` +} + +type CloudFunctionConfiguration struct { + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `type:"string"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + InvocationRole *string `type:"string"` + + metadataCloudFunctionConfiguration `json:"-" xml:"-"` +} + +type metadataCloudFunctionConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type CommonPrefix struct { + Prefix *string `type:"string"` + + metadataCommonPrefix `json:"-" xml:"-"` +} + +type metadataCommonPrefix struct { + SDKShapeTraits bool `type:"structure"` +} + +type CompleteMultipartUploadInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataCompleteMultipartUploadInput `json:"-" xml:"-"` +} + +type metadataCompleteMultipartUploadInput struct { + SDKShapeTraits bool `type:"structure" payload:"MultipartUpload"` +} + +type CompleteMultipartUploadOutput struct { + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // Version of the object. + VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + metadataCompleteMultipartUploadOutput `json:"-" xml:"-"` +} + +type metadataCompleteMultipartUploadOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type CompletedMultipartUpload struct { + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` + + metadataCompletedMultipartUpload `json:"-" xml:"-"` +} + +type metadataCompletedMultipartUpload struct { + SDKShapeTraits bool `type:"structure"` +} + +type CompletedPart struct { + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. + PartNumber *int64 `type:"integer"` + + metadataCompletedPart `json:"-" xml:"-"` +} + +type metadataCompletedPart struct { + SDKShapeTraits bool `type:"structure"` +} + +type Condition struct { + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HTTPErrorCodeReturnedEquals *string `locationName:"HttpErrorCodeReturnedEquals" type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` + + metadataCondition `json:"-" xml:"-"` +} + +type metadataCondition struct { + SDKShapeTraits bool `type:"structure"` +} + +type CopyObjectInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + XAmzTaggingDirective *string `location:"header" locationName:"x-kss-tagging-Directive" type:"string"` + + XAmzTagging *string `location:"header" locationName:"x-kss-tagging" type:"string"` + + metadataCopyObjectInput `json:"-" xml:"-"` +} + +type metadataCopyObjectInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type CopyObjectOutput struct { + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + metadataCopyObjectOutput `json:"-" xml:"-"` +} + +type metadataCopyObjectOutput struct { + SDKShapeTraits bool `type:"structure" payload:"CopyObjectResult"` +} + +type CopyObjectResult struct { + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataCopyObjectResult `json:"-" xml:"-"` +} + +type metadataCopyObjectResult struct { + SDKShapeTraits bool `type:"structure"` +} + +type CopyPartResult struct { + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataCopyPartResult `json:"-" xml:"-"` +} + +type metadataCopyPartResult struct { + SDKShapeTraits bool `type:"structure"` +} + +type CreateBucketConfiguration struct { + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string"` + + metadataCreateBucketConfiguration `json:"-" xml:"-"` +} + +type metadataCreateBucketConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type CreateBucketInput struct { + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + ProjectId *string `location:"uri" locationName:"ProjectId" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + BucketType *string `location:"header" locationName:"x-amz-bucket-type" type:"string"` + + MetadataCreateBucketInput `json:"-" xml:"-"` +} + +type MetadataCreateBucketInput struct { + SDKShapeTraits bool `type:"structure" payload:"CreateBucketConfiguration"` +} + +type CreateBucketOutput struct { + Location *string `location:"header" locationName:"Location" type:"string"` + + metadataCreateBucketOutput `json:"-" xml:"-"` +} + +type metadataCreateBucketOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type CreateMultipartUploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + XAmzTagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + metadataCreateMultipartUploadInput `json:"-" xml:"-"` +} + +type metadataCreateMultipartUploadInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type CreateMultipartUploadOutput struct { + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // ID for the initiated multipart upload. + UploadID *string `locationName:"UploadId" type:"string"` + + metadataCreateMultipartUploadOutput `json:"-" xml:"-"` +} + +type metadataCreateMultipartUploadOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type Delete struct { + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` + + metadataDelete `json:"-" xml:"-"` +} + +type metadataDelete struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketCORSInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketCORSInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketCORSInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketCORSOutput struct { + metadataDeleteBucketCORSOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketCORSOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketLifecycleInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketLifecycleInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketLifecycleInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketLifecycleOutput struct { + metadataDeleteBucketLifecycleOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketLifecycleOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketOutput struct { + metadataDeleteBucketOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketPolicyInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketPolicyInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketPolicyInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketPolicyOutput struct { + metadataDeleteBucketPolicyOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketPolicyOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketReplicationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketReplicationInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketReplicationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketReplicationOutput struct { + metadataDeleteBucketReplicationOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketReplicationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketTaggingInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketTaggingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketTaggingOutput struct { + metadataDeleteBucketTaggingOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketTaggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketWebsiteInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteBucketWebsiteInput `json:"-" xml:"-"` +} + +type metadataDeleteBucketWebsiteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteBucketWebsiteOutput struct { + metadataDeleteBucketWebsiteOutput `json:"-" xml:"-"` +} + +type metadataDeleteBucketWebsiteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteMarkerEntry struct { + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionID *string `locationName:"VersionId" type:"string"` + + metadataDeleteMarkerEntry `json:"-" xml:"-"` +} + +type metadataDeleteMarkerEntry struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteObjectInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionID *string `location:"querystring" locationName:"versionId" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteObjectInput `json:"-" xml:"-"` +} + +type DeleteBucketPrefixInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + Prefix *string `type:"string" required:"true"` + IsReTurnResults *bool `type:"boolean" required:"true"` +} + +type metadataDeleteObjectInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteObjectOutput struct { + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + metadataDeleteObjectOutput `json:"-" xml:"-"` +} + +type metadataDeleteObjectOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteObjectsInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + IsReTurnResults *bool `type:"boolean" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteObjectsInput `json:"-" xml:"-"` +} + +type metadataDeleteObjectsInput struct { + SDKShapeTraits bool `type:"structure" payload:"Delete"` +} + +type DeleteObjectsOutput struct { + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + ErrorsCount *int64 `locationName:"ErrorsCount" type:"integer"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + metadataDeleteObjectsOutput `json:"-" xml:"-"` +} + +type metadataDeleteObjectsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeletedObject struct { + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionID *string `locationName:"DeleteMarkerVersionId" type:"string"` + + Key *string `type:"string"` + + VersionID *string `locationName:"VersionId" type:"string"` + + metadataDeletedObject `json:"-" xml:"-"` +} + +type metadataDeletedObject struct { + SDKShapeTraits bool `type:"structure"` +} + +type Destination struct { + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + metadataDestination `json:"-" xml:"-"` +} + +type metadataDestination struct { + SDKShapeTraits bool `type:"structure"` +} + +type Error struct { + Code *string `type:"string"` + + Key *string `type:"string"` + + Message *string `type:"string"` + + VersionID *string `locationName:"VersionId" type:"string"` + + metadataError `json:"-" xml:"-"` +} + +type metadataError struct { + SDKShapeTraits bool `type:"structure"` +} + +type ErrorDocument struct { + // The object key name to use when a 4XX class error occurs. + Key *string `type:"string" required:"true"` + + metadataErrorDocument `json:"-" xml:"-"` +} + +type metadataErrorDocument struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketACLInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketACLInput `json:"-" xml:"-"` +} + +type metadataGetBucketACLInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketACLOutput struct { + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + metadataGetBucketACLOutput `json:"-" xml:"-"` +} + +type metadataGetBucketACLOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketCORSInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketCORSInput `json:"-" xml:"-"` +} + +type metadataGetBucketCORSInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketCORSOutput struct { + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` + + metadataGetBucketCORSOutput `json:"-" xml:"-"` +} + +type metadataGetBucketCORSOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLifecycleInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketLifecycleInput `json:"-" xml:"-"` +} + +type metadataGetBucketLifecycleInput struct { + SDKShapeTraits bool `type:"structure"` +} +type LifecycleFilter struct { + And *And `locationName:"And" type :"structure"` + metadataLifecycleFilter `json:"-" xml:"-"` +} +type metadataLifecycleFilter struct { + SDKShapeTraits bool `type:"structure"` +} +type And struct { + Prefix *string `type:"string" required:"true"` + Tag []*Tag `locationNameList:"Tag" type:"list" flattened:"true"` + metadataAnd `json:"-" xml:"-"` +} +type metadataAnd struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLifecycleOutput struct { + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` + + metadataGetBucketLifecycleOutput `json:"-" xml:"-"` +} + +type metadataGetBucketLifecycleOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLocationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketLocationInput `json:"-" xml:"-"` +} + +type metadataGetBucketLocationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLocationOutput struct { + LocationConstraint *string `type:"string"` + + metadataGetBucketLocationOutput `json:"-" xml:"-"` +} + +type metadataGetBucketLocationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLoggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketLoggingInput `json:"-" xml:"-"` +} + +type metadataGetBucketLoggingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketLoggingOutput struct { + LoggingEnabled *LoggingEnabled `type:"structure"` + + metadataGetBucketLoggingOutput `json:"-" xml:"-"` +} + +type metadataGetBucketLoggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketNotificationConfigurationRequest struct { + // Name of the buket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + metadataGetBucketNotificationConfigurationRequest `json:"-" xml:"-"` +} + +type metadataGetBucketNotificationConfigurationRequest struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketPolicyInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketPolicyInput `json:"-" xml:"-"` +} + +type metadataGetBucketPolicyInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketPolicyOutput struct { + // The bucket policy as a JSON document. + Policy *string `type:"string"` + + metadataGetBucketPolicyOutput `json:"-" xml:"-"` +} + +type metadataGetBucketPolicyOutput struct { + SDKShapeTraits bool `type:"structure" payload:"Policy"` +} + +type GetBucketReplicationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketReplicationInput `json:"-" xml:"-"` +} + +type metadataGetBucketReplicationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketReplicationOutput struct { + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` + + metadataGetBucketReplicationOutput `json:"-" xml:"-"` +} + +type metadataGetBucketReplicationOutput struct { + SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"` +} + +type GetBucketRequestPaymentInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketRequestPaymentInput `json:"-" xml:"-"` +} + +type metadataGetBucketRequestPaymentInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketRequestPaymentOutput struct { + // Specifies who pays for the download and request fees. + Payer *string `type:"string"` + + metadataGetBucketRequestPaymentOutput `json:"-" xml:"-"` +} + +type metadataGetBucketRequestPaymentOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketTaggingInput `json:"-" xml:"-"` +} + +type metadataGetBucketTaggingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketTaggingOutput struct { + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + metadataGetBucketTaggingOutput `json:"-" xml:"-"` +} + +type metadataGetBucketTaggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketVersioningInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketVersioningInput `json:"-" xml:"-"` +} + +type metadataGetBucketVersioningInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketVersioningOutput struct { + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string"` + + // The versioning state of the bucket. + Status *string `type:"string"` + + metadataGetBucketVersioningOutput `json:"-" xml:"-"` +} + +type metadataGetBucketVersioningOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketWebsiteInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetBucketWebsiteInput `json:"-" xml:"-"` +} + +type metadataGetBucketWebsiteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetBucketWebsiteOutput struct { + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` + + metadataGetBucketWebsiteOutput `json:"-" xml:"-"` +} + +type metadataGetBucketWebsiteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectACLInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionID *string `location:"querystring" locationName:"versionId" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetObjectACLInput `json:"-" xml:"-"` +} + +type metadataGetObjectACLInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectACLOutput struct { + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + metadataGetObjectACLOutput `json:"-" xml:"-"` +} + +type metadataGetObjectACLOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionID *string `location:"querystring" locationName:"versionId" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + TrafficLimit *int64 `location:"header" locationName:"x-kss-traffic-limit" type:"string"` + + metadataGetObjectInput `json:"-" xml:"-"` +} + +type metadataGetObjectInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectOutput struct { + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // Version of the object. + VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + metadataGetObjectOutput `json:"-" xml:"-"` +} + +type metadataGetObjectOutput struct { + SDKShapeTraits bool `type:"structure" payload:"Body"` +} + +type GetObjectTorrentInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetObjectTorrentInput `json:"-" xml:"-"` +} + +type metadataGetObjectTorrentInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectTorrentOutput struct { + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + metadataGetObjectTorrentOutput `json:"-" xml:"-"` +} + +type metadataGetObjectTorrentOutput struct { + SDKShapeTraits bool `type:"structure" payload:"Body"` +} + +type Grant struct { + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string"` + + metadataGrant `json:"-" xml:"-"` +} + +type metadataGrant struct { + SDKShapeTraits bool `type:"structure"` +} + +type Grantee struct { + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true"` + + // URI of the grantee group. + URI *string `type:"string"` + + metadataGrantee `json:"-" xml:"-"` +} + +type metadataGrantee struct { + SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` +} + +type HeadBucketInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataHeadBucketInput `json:"-" xml:"-"` +} + +type metadataHeadBucketInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type HeadBucketOutput struct { + metadataHeadBucketOutput `json:"-" xml:"-"` +} + +type metadataHeadBucketOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type HeadObjectInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionID *string `location:"querystring" locationName:"versionId" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataHeadObjectInput `json:"-" xml:"-"` +} + +type metadataHeadObjectInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type HeadObjectOutput struct { + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // Version of the object. + VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + metadataHeadObjectOutput `json:"-" xml:"-"` +} + +type metadataHeadObjectOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type IndexDocument struct { + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` + + metadataIndexDocument `json:"-" xml:"-"` +} + +type metadataIndexDocument struct { + SDKShapeTraits bool `type:"structure"` +} + +type Initiator struct { + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` + + metadataInitiator `json:"-" xml:"-"` +} + +type metadataInitiator struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionARN *string `locationName:"CloudFunction" type:"string" required:"true"` + + metadataLambdaFunctionConfiguration `json:"-" xml:"-"` +} + +type metadataLambdaFunctionConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type LifecycleConfiguration struct { + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + + metadataLifecycleConfiguration `json:"-" xml:"-"` +} + +type metadataLifecycleConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type LifecycleExpiration struct { + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + metadataLifecycleExpiration `json:"-" xml:"-"` +} + +type metadataLifecycleExpiration struct { + SDKShapeTraits bool `type:"structure"` +} + +type LifecycleRule struct { + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the GLACIER storage class. If your bucket is versioning-enabled + // (or versioning is suspended), you can set this action to request that Amazon + // S3 transition noncurrent object versions to the GLACIER storage class at + // a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Filter *LifecycleFilter `type:"structure"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` + + metadataLifecycleRule `json:"-" xml:"-"` +} + +type metadataLifecycleRule struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListBucketsInput struct { + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + metadataListBucketsInput `json:"-" xml:"-"` +} + +type metadataListBucketsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListBucketsOutput struct { + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` + + metadataListBucketsOutput `json:"-" xml:"-"` +} + +type metadataListBucketsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListMultipartUploadsInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIDMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataListMultipartUploadsInput `json:"-" xml:"-"` +} + +type metadataListMultipartUploadsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListMultipartUploadsOutput struct { + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIDMarker *string `locationName:"NextUploadIdMarker" type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIDMarker *string `locationName:"UploadIdMarker" type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` + + metadataListMultipartUploadsOutput `json:"-" xml:"-"` +} + +type metadataListMultipartUploadsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListObjectVersionsInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIDMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataListObjectVersionsInput `json:"-" xml:"-"` +} + +type metadataListObjectVersionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListObjectVersionsOutput struct { + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIDMarker *string `locationName:"NextVersionIdMarker" type:"string"` + + Prefix *string `type:"string"` + + VersionIDMarker *string `locationName:"VersionIdMarker" type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` + + metadataListObjectVersionsOutput `json:"-" xml:"-"` +} + +type metadataListObjectVersionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListObjectsInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataListObjectsInput `json:"-" xml:"-"` +} + +type metadataListObjectsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListObjectsOutput struct { + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` + + metadataListObjectsOutput `json:"-" xml:"-"` +} + +type metadataListObjectsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListPartsInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataListPartsInput `json:"-" xml:"-"` +} + +type metadataListPartsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type ListPartsOutput struct { + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadID *string `locationName:"UploadId" type:"string"` + + metadataListPartsOutput `json:"-" xml:"-"` +} + +type metadataListPartsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type LoggingEnabled struct { + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` + + metadataLoggingEnabled `json:"-" xml:"-"` +} + +type metadataLoggingEnabled struct { + SDKShapeTraits bool `type:"structure"` +} + +type MultipartUpload struct { + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + // Upload ID that identifies the multipart upload. + UploadID *string `locationName:"UploadId" type:"string"` + + metadataMultipartUpload `json:"-" xml:"-"` +} + +type metadataMultipartUpload struct { + SDKShapeTraits bool `type:"structure"` +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + metadataNoncurrentVersionExpiration `json:"-" xml:"-"` +} + +type metadataNoncurrentVersionExpiration struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the GLACIER storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon +// S3 transition noncurrent object versions to the GLACIER storage class at +// a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + metadataNoncurrentVersionTransition `json:"-" xml:"-"` +} + +type metadataNoncurrentVersionTransition struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` + + metadataNotificationConfiguration `json:"-" xml:"-"` +} + +type metadataNotificationConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type NotificationConfigurationDeprecated struct { + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` + + metadataNotificationConfigurationDeprecated `json:"-" xml:"-"` +} + +type metadataNotificationConfigurationDeprecated struct { + SDKShapeTraits bool `type:"structure"` +} + +type Object struct { + ETag *string `type:"string"` + + Key *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + metadataObject `json:"-" xml:"-"` +} + +type metadataObject struct { + SDKShapeTraits bool `type:"structure"` +} + +type ObjectIdentifier struct { + // Key name of the object to delete. + Key *string `type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionID *string `locationName:"VersionId" type:"string"` + + metadataObjectIdentifier `json:"-" xml:"-"` +} + +type metadataObjectIdentifier struct { + SDKShapeTraits bool `type:"structure"` +} + +type ObjectVersion struct { + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + // Version ID of an object. + VersionID *string `locationName:"VersionId" type:"string"` + + metadataObjectVersion `json:"-" xml:"-"` +} + +type metadataObjectVersion struct { + SDKShapeTraits bool `type:"structure"` +} + +type Owner struct { + DisplayName *string `type:"string"` + + ID *string `type:"string"` + + metadataOwner `json:"-" xml:"-"` +} + +type metadataOwner struct { + SDKShapeTraits bool `type:"structure"` +} + +type Part struct { + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` + + metadataPart `json:"-" xml:"-"` +} + +type metadataPart struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketACLInput struct { + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketACLInput `json:"-" xml:"-"` +} + +type metadataPutBucketACLInput struct { + SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"` +} + +type PutBucketACLOutput struct { + metadataPutBucketACLOutput `json:"-" xml:"-"` +} + +type metadataPutBucketACLOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketCORSInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketCORSInput `json:"-" xml:"-"` +} + +type metadataPutBucketCORSInput struct { + SDKShapeTraits bool `type:"structure" payload:"CORSConfiguration"` +} + +type PutBucketCORSOutput struct { + metadataPutBucketCORSOutput `json:"-" xml:"-"` +} + +type metadataPutBucketCORSOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketLifecycleInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketLifecycleInput `json:"-" xml:"-"` +} + +type metadataPutBucketLifecycleInput struct { + SDKShapeTraits bool `type:"structure" payload:"LifecycleConfiguration"` +} + +type PutBucketLifecycleOutput struct { + metadataPutBucketLifecycleOutput `json:"-" xml:"-"` +} + +type metadataPutBucketLifecycleOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketLoggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketLoggingInput `json:"-" xml:"-"` +} + +type metadataPutBucketLoggingInput struct { + SDKShapeTraits bool `type:"structure" payload:"BucketLoggingStatus"` +} + +type PutBucketLoggingOutput struct { + metadataPutBucketLoggingOutput `json:"-" xml:"-"` +} + +type metadataPutBucketLoggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketNotificationConfigurationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketNotificationConfigurationInput `json:"-" xml:"-"` +} + +type metadataPutBucketNotificationConfigurationInput struct { + SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"` +} + +type PutBucketNotificationConfigurationOutput struct { + metadataPutBucketNotificationConfigurationOutput `json:"-" xml:"-"` +} + +type metadataPutBucketNotificationConfigurationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketNotificationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketNotificationInput `json:"-" xml:"-"` +} + +type metadataPutBucketNotificationInput struct { + SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"` +} + +type PutBucketNotificationOutput struct { + metadataPutBucketNotificationOutput `json:"-" xml:"-"` +} + +type metadataPutBucketNotificationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketPolicyInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketPolicyInput `json:"-" xml:"-"` +} + +type metadataPutBucketPolicyInput struct { + SDKShapeTraits bool `type:"structure" payload:"Policy"` +} + +type PutBucketPolicyOutput struct { + metadataPutBucketPolicyOutput `json:"-" xml:"-"` +} + +type metadataPutBucketPolicyOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketReplicationInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketReplicationInput `json:"-" xml:"-"` +} + +type metadataPutBucketReplicationInput struct { + SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"` +} + +type PutBucketReplicationOutput struct { + metadataPutBucketReplicationOutput `json:"-" xml:"-"` +} + +type metadataPutBucketReplicationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketRequestPaymentInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketRequestPaymentInput `json:"-" xml:"-"` +} + +type metadataPutBucketRequestPaymentInput struct { + SDKShapeTraits bool `type:"structure" payload:"RequestPaymentConfiguration"` +} + +type PutBucketRequestPaymentOutput struct { + metadataPutBucketRequestPaymentOutput `json:"-" xml:"-"` +} + +type metadataPutBucketRequestPaymentOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketTaggingInput `json:"-" xml:"-"` +} + +type metadataPutBucketTaggingInput struct { + SDKShapeTraits bool `type:"structure" payload:"Tagging"` +} + +type PutBucketTaggingOutput struct { + metadataPutBucketTaggingOutput `json:"-" xml:"-"` +} + +type metadataPutBucketTaggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketVersioningInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketVersioningInput `json:"-" xml:"-"` +} + +type metadataPutBucketVersioningInput struct { + SDKShapeTraits bool `type:"structure" payload:"VersioningConfiguration"` +} + +type PutBucketVersioningOutput struct { + metadataPutBucketVersioningOutput `json:"-" xml:"-"` +} + +type metadataPutBucketVersioningOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutBucketWebsiteInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutBucketWebsiteInput `json:"-" xml:"-"` +} + +type metadataPutBucketWebsiteInput struct { + SDKShapeTraits bool `type:"structure" payload:"WebsiteConfiguration"` +} + +type PutBucketWebsiteOutput struct { + metadataPutBucketWebsiteOutput `json:"-" xml:"-"` +} + +type metadataPutBucketWebsiteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutObjectACLInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutObjectACLInput `json:"-" xml:"-"` +} + +type metadataPutObjectACLInput struct { + SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"` +} + +type PutObjectACLOutput struct { + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + metadataPutObjectACLOutput `json:"-" xml:"-"` +} + +type metadataPutObjectACLOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type PutObjectInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + XAmzTagging *string `location:"header" locationName:"X-Amz-Tagging" type:"string"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + ContentMaxLength *int64 `location:"header" locationName:"x-amz-content-maxlength" type:"integer"` + + CallbackUrl *string `location:"header" locationName:"x-kss-callbackurl" type:"string"` + CallbackBody *string `location:"header" locationName:"x-kss-callbackbody" type:"string"` + + TrafficLimit *int64 `location:"header" locationName:"x-kss-traffic-limit" type:"string"` + + metadataPutObjectInput `json:"-" xml:"-"` +} +type PutReaderRequest struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + // Object data. + Body io.Reader `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + ContentMaxLength *int64 `location:"header" locationName:"x-amz-content-maxlength" type:"integer"` + + CallbackUrl *string `location:"header" locationName:"x-kss-callbackurl" type:"string"` + CallbackBody *string `location:"header" locationName:"x-kss-callbackbody" type:"string"` + + metadataPutObjectInput `json:"-" xml:"-"` +} + +type metadataPutObjectInput struct { + SDKShapeTraits bool `type:"structure" payload:"Body"` +} + +type PutObjectOutput struct { + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // Version of the object. + VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + NewFileName *string `location:"header" locationName:"newfilename" type:"string"` + + metadataPutObjectOutput `json:"-" xml:"-"` +} + +type metadataPutObjectOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueARN *string `locationName:"Queue" type:"string" required:"true"` + + metadataQueueConfiguration `json:"-" xml:"-"` +} + +type metadataQueueConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type QueueConfigurationDeprecated struct { + // Bucket event for which to send notifications. + Event *string `type:"string"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + Queue *string `type:"string"` + + metadataQueueConfigurationDeprecated `json:"-" xml:"-"` +} + +type metadataQueueConfigurationDeprecated struct { + SDKShapeTraits bool `type:"structure"` +} + +type Redirect struct { + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HTTPRedirectCode *string `locationName:"HttpRedirectCode" type:"string"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` + + metadataRedirect `json:"-" xml:"-"` +} + +type metadataRedirect struct { + SDKShapeTraits bool `type:"structure"` +} + +type RedirectAllRequestsTo struct { + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string"` + + metadataRedirectAllRequestsTo `json:"-" xml:"-"` +} + +type metadataRedirectAllRequestsTo struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + + metadataReplicationConfiguration `json:"-" xml:"-"` +} + +type metadataReplicationConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type ReplicationRule struct { + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true"` + + metadataReplicationRule `json:"-" xml:"-"` +} + +type metadataReplicationRule struct { + SDKShapeTraits bool `type:"structure"` +} + +type RequestPaymentConfiguration struct { + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true"` + + metadataRequestPaymentConfiguration `json:"-" xml:"-"` +} + +type metadataRequestPaymentConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type RestoreObjectInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionID *string `location:"querystring" locationName:"versionId" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataRestoreObjectInput `json:"-" xml:"-"` +} + +type metadataRestoreObjectInput struct { + SDKShapeTraits bool `type:"structure" payload:"RestoreRequest"` +} + +type RestoreObjectOutput struct { + Ks3WebServiceResponse +} + +type metadataRestoreObjectOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type RestoreRequest struct { + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` + + metadataRestoreRequest `json:"-" xml:"-"` +} + +type metadataRestoreRequest struct { + SDKShapeTraits bool `type:"structure"` +} + +type RoutingRule struct { + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` + + metadataRoutingRule `json:"-" xml:"-"` +} + +type metadataRoutingRule struct { + SDKShapeTraits bool `type:"structure"` +} + +type Tag struct { + // Name of the tag. + Key *string `type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` + + metadataTag `json:"-" xml:"-"` +} + +type metadataTag struct { + SDKShapeTraits bool `type:"structure"` +} + +type Tagging struct { + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + metadataTagging `json:"-" xml:"-"` +} + +type metadataTagging struct { + SDKShapeTraits bool `type:"structure"` +} + +type TargetGrant struct { + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string"` + + metadataTargetGrant `json:"-" xml:"-"` +} + +type metadataTargetGrant struct { + SDKShapeTraits bool `type:"structure"` +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicARN *string `locationName:"Topic" type:"string" required:"true"` + + metadataTopicConfiguration `json:"-" xml:"-"` +} + +type metadataTopicConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type TopicConfigurationDeprecated struct { + // Bucket event for which to send notifications. + Event *string `type:"string"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + ID *string `locationName:"Id" type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` + + metadataTopicConfigurationDeprecated `json:"-" xml:"-"` +} + +type metadataTopicConfigurationDeprecated struct { + SDKShapeTraits bool `type:"structure"` +} + +type Transition struct { + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string"` + + metadataTransition `json:"-" xml:"-"` +} + +type metadataTransition struct { + SDKShapeTraits bool `type:"structure"` +} + +type UploadPartCopyInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Part number of part being copied. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataUploadPartCopyInput `json:"-" xml:"-"` +} + +type metadataUploadPartCopyInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type UploadPartCopyOutput struct { + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + metadataUploadPartCopyOutput `json:"-" xml:"-"` +} + +type metadataUploadPartCopyOutput struct { + SDKShapeTraits bool `type:"structure" payload:"CopyPartResult"` +} + +type UploadPartInput struct { + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // Part number of part being uploaded. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataUploadPartInput `json:"-" xml:"-"` +} + +type metadataUploadPartInput struct { + SDKShapeTraits bool `type:"structure" payload:"Body"` +} + +type UploadPartOutput struct { + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + metadataUploadPartOutput `json:"-" xml:"-"` +} + +type metadataUploadPartOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +type VersioningConfiguration struct { + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string"` + + // The versioning state of the bucket. + Status *string `type:"string"` + + metadataVersioningConfiguration `json:"-" xml:"-"` +} + +type metadataVersioningConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type WebsiteConfiguration struct { + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` + + metadataWebsiteConfiguration `json:"-" xml:"-"` +} + +type metadataWebsiteConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +type Header map[string][]string + +type Ks3WebServiceResponse struct { + HttpCode int `xml:"HttpCode"` + Code string `xml:"Code"` + Message string `xml:"Message"` + Resource string `xml:"Resource"` + RequestId string `xml:"RequestId"` + Header Header + Body []byte +} + +func (c *S3) SignedReq(req *http.Request, canonicalizedResource string) { + + ossHeadersMap := make(map[string]string) + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-kss-") { + ossHeadersMap[strings.ToLower(k)] = v[0] + } + } + hs := newHeaderSorter(ossHeadersMap) + hs.Sort() + canonicalizedKS3Headers := "" + for i := range hs.Keys { + canonicalizedKS3Headers += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedKS3Headers + canonicalizedResource + if c.Config.LogHTTPBody { + fmt.Print("signStr : ", signStr+"\n") + } + config, _ := c.Config.Credentials.Get() + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(config.SecretAccessKey)) + io.WriteString(h, signStr) + signedStr := "KSS " + config.AccessKeyID + ":" + base64.StdEncoding.EncodeToString(h.Sum(nil)) + if c.Config.LogHTTPBody { + fmt.Print("signedStr : ", signedStr+"\n") + } + req.Header.Set(HTTPHeaderAuthorization, signedStr) + +} + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +/** + ACL类型 +*/ +const AllUsersUri = "http://acs.amazonaws.com/groups/global/AllUsers" + +type CannedAccessControlType int32 + +const ( + PublicReadWrite CannedAccessControlType = 0 + PublicRead CannedAccessControlType = 1 + Private CannedAccessControlType = 2 +) + +func GetAcl(resp GetObjectACLOutput) CannedAccessControlType { + + allUsersPermissions := mapset.NewSet() + for _, value := range resp.Grants { + if value.Grantee.URI != nil && *value.Grantee.URI == AllUsersUri { + allUsersPermissions.Add(value.Permission) + } + } + read := allUsersPermissions.Contains("READ") + write := allUsersPermissions.Contains("WRITE") + if read && write { + return PublicReadWrite + } else if read { + return PublicRead + } else { + return Private + } +} + +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + oprw.Lock() + defer oprw.Unlock() + + if input == nil { + input = &RestoreObjectInput{} + } + out := &RestoreObjectOutput{} + + date := time.Now().UTC().Format(http.TimeFormat) + client := &http.Client{} + objectKey := *input.Key + "?restore" + resource := "/" + *input.Bucket + "/" + objectKey + url := c.Endpoint + resource + req, err := http.NewRequest("POST", url, nil) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, c.Endpoint) + c.SignedReq(req, resource) + res, err := client.Do(req) + if res != nil { + defer res.Body.Close() + body, _ := ioutil.ReadAll(res.Body) + xml.Unmarshal((body), &out) + if err != nil { + fmt.Println(err) + } + if res.Header != nil { + out.Header = Header(res.Header) + out.HttpCode = res.StatusCode + } + if body != nil { + out.Body = body + } + } + return out, err + +} + +//----obj tag start-- + +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *aws.Request, output *DeleteObjectTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opDeleteObjectTagging == nil { + opDeleteObjectTagging = &aws.Operation{ + Name: "DeleteObjectTagging", + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + req = c.newRequest(opDeleteObjectTagging, input, output) + output = &DeleteObjectTaggingOutput{} + req.Data = output + return +} + +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) DeleteObjectTaggingPresignedUrl(input *DeleteObjectTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.DeleteObjectTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opDeleteObjectTagging *aws.Operation + +type DeleteObjectTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataDeleteObjectTaggingInput `json:"-" xml:"-"` +} + +type metadataDeleteObjectTaggingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type DeleteObjectTaggingOutput struct { + metadataDeleteObjectTaggingOutput `json:"-" xml:"-"` +} + +type metadataDeleteObjectTaggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +//get 对象标签 +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *aws.Request, output *GetObjectTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opGetObjectTagging == nil { + opGetObjectTagging = &aws.Operation{ + Name: "GetObjectTagging", + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + req = c.newRequest(opGetObjectTagging, input, output) + output = &GetObjectTaggingOutput{} + req.Data = output + return +} + +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) GetObjectTaggingPresignedUrl(input *GetObjectTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.GetObjectTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opGetObjectTagging *aws.Operation + +// +type GetObjectTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataGetObjectTaggingInput `json:"-" xml:"-"` +} + +type metadataGetObjectTaggingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type GetObjectTaggingOutput struct { + Tagging *Tagging `locationName:"Tagging" type:"structure"` + + metadataGetObjectTaggingOutput `json:"-" xml:"-"` +} + +type metadataGetObjectTaggingOutput struct { + SDKShapeTraits bool `type:"structure" payload:"Tagging"` +} + +//对象标签 put +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *aws.Request, output *PutObjectTaggingOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opPutObjectTagging == nil { + opPutObjectTagging = &aws.Operation{ + Name: "PutObjectTagging", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + req = c.newRequest(opPutObjectTagging, input, output) + output = &PutObjectTaggingOutput{} + req.Data = output + return +} + +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + input.ContentType = aws.String("application/xml") + req, out := c.PutObjectTaggingRequest(input) + err := req.Send() + return out, err +} +func (c *S3) PutObjectTaggingPresignedUrl(input *PutObjectTaggingInput, expires time.Duration) (*url.URL, error) { + req, _ := c.PutObjectTaggingRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opPutObjectTagging *aws.Operation + +type PutObjectTaggingInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + metadataPutObjectTaggingInput `json:"-" xml:"-"` +} + +type metadataPutObjectTaggingInput struct { + SDKShapeTraits bool `type:"structure" payload:"Tagging"` +} + +type PutObjectTaggingOutput struct { + metadataPutObjectTaggingOutput `json:"-" xml:"-"` +} + +type metadataPutObjectTaggingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +//----obj tag end-- + +// FetchObjectRequest generates a request for the FetchObject operation. +func (c *S3) FetchObjectRequest(input *FetchObjectInput) (req *aws.Request, output *FetchObjectOutput) { + oprw.Lock() + defer oprw.Unlock() + + if opFetchObject == nil { + opFetchObject = &aws.Operation{ + Name: "FetchObject", + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?fetch", + } + } + + if input == nil { + input = &FetchObjectInput{} + } + + req = c.newRequest(opFetchObject, input, output) + output = &FetchObjectOutput{} + req.Data = output + return +} + +func (c *S3) FetchObject(input *FetchObjectInput) (*FetchObjectOutput, error) { + req, out := c.FetchObjectRequest(input) + err := req.Send() + return out, err +} +func (c *S3) FetchObjectPresignedUrl(input *FetchObjectInput, expires time.Duration) (*url.URL, error) { + req, _ := c.FetchObjectRequest(input) + req.ExpireTime = expires + err := req.Sign() + return req.HTTPRequest.URL, err +} + +var opFetchObject *aws.Operation + +type FetchObjectInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-kss-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + SourceUrl *string `location:"header" locationName:"x-kss-sourceurl" type:"string" required:"true"` + + CallbackUrl *string `location:"header" locationName:"x-kss-callbackurl" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + XAmzTagging *string `location:"header" locationName:"X-Amz-Tagging" type:"string"` + + metadataFetchObjectInput `json:"-" xml:"-"` +} + +type metadataFetchObjectInput struct { + SDKShapeTraits bool `type:"structure"` +} + +type FetchObjectOutput struct { + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + metadataFetchObjectOutput `json:"-" xml:"-"` +} + +type metadataFetchObjectOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +//--------fetch object end------- + +//--------镜像回源------- +type BucketMirror struct { + Version *string `json:"version"` + UseDefaultRobots *bool `json:"use_default_robots"` + AsyncMirrorRule *AsyncMirrorRule `json:"async_mirror_rule,omitempty"` + SyncMirrorRules []*SyncMirrorRules `json:"sync_mirror_rules,omitempty"` +} +type SavingSetting struct { + ACL *string `json:"acl,omitempty" required:"true"` +} +type AsyncMirrorRule struct { + MirrorUrls []*string `json:"mirror_urls,omitempty" required:"true"` + SavingSetting *SavingSetting `json:"saving_setting,omitempty" required:"true"` +} +type MatchCondition struct { + HTTPCodes []*string `json:"http_codes,omitempty"` + KeyPrefixes []*string `json:"key_prefixes,omitempty"` +} +type SetHeaders struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} +type RemoveHeaders struct { + Key *string `json:"key,omitempty"` +} +type PassHeaders struct { + Key *string `json:"key,omitempty"` +} +type HeaderSetting struct { + SetHeaders []*SetHeaders `json:"set_headers,omitempty"` + RemoveHeaders []*RemoveHeaders `json:"remove_headers,omitempty"` + PassAll *bool `json:"pass_all,omitempty"` + PassHeaders []*PassHeaders `json:"pass_headers,omitempty"` +} +type MirrorRequestSetting struct { + PassQueryString *bool `json:"pass_query_string,omitempty"` + Follow3Xx *bool `json:"follow3xx,omitempty"` + HeaderSetting *HeaderSetting `json:"header_setting,omitempty"` +} +type SyncMirrorRules struct { + MatchCondition MatchCondition `json:"match_condition"` + MirrorURL *string `json:"mirror_url,omitempty"` + MirrorRequestSetting *MirrorRequestSetting `json:"mirror_request_setting,omitempty"` + SavingSetting *SavingSetting `json:"saving_setting,omitempty"` +} + +func (c *S3) PutBucketMirror(input *PutBucketMirrorInput) (*PutBucketMirrorOutput, error) { + + oprw.Lock() + defer oprw.Unlock() + + if input == nil { + input = &PutBucketMirrorInput{} + } + out := &PutBucketMirrorOutput{} + date := time.Now().UTC().Format(http.TimeFormat) + resource := "/" + *input.Bucket + "?mirror" + signResource := "/" + *input.Bucket + "/?mirror" + url := c.Endpoint + resource + data, err := json.Marshal(input.BucketMirror) + if err != nil { + panic(errors.New("解析失败:" + string(data))) + } + request, _ := http.NewRequest("PUT", url, bytes.NewReader(data)) + request.Header.Set(HTTPHeaderContentType, "application/json") + request.Header.Set(HTTPHeaderDate, date) + request.Header.Set(HTTPHeaderHost, c.Endpoint) + c.SignedReq(request, signResource) + client := &http.Client{} + res, err := client.Do(request) + if res != nil { + defer res.Body.Close() + body, _ := ioutil.ReadAll(res.Body) + xml.Unmarshal((body), &out) + if err != nil { + fmt.Println(err) + } + if res.Header != nil { + out.Header = Header(res.Header) + out.HttpCode = res.StatusCode + } + if body != nil { + out.Body = body + } + } + return out, err + +} + +var opPutBucketMirror *aws.Operation + +type PutBucketMirrorInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketMirror *BucketMirror `locationName:"Mirror" type:"structure"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} + +type PutBucketMirrorOutput struct { + Ks3WebServiceResponse `json:"-" xml:"-"` +} + +type GetBucketMirrorInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} +type GetBucketMirrorOutput struct { + Ks3WebServiceResponse `json:"-" xml:"-"` + BucketMirror *BucketMirror `locationName:"Mirror" type:"structure"` +} + +func (c *S3) GetBucketMirror(input *GetBucketMirrorInput) (*GetBucketMirrorOutput, error) { + + oprw.Lock() + defer oprw.Unlock() + + if input == nil { + input = &GetBucketMirrorInput{} + } + out := &GetBucketMirrorOutput{} + date := time.Now().UTC().Format(http.TimeFormat) + resource := "/" + *input.Bucket + "?mirror" + signResource := "/" + *input.Bucket + "/?mirror" + url := c.Endpoint + resource + request, _ := http.NewRequest("GET", url, nil) + request.Header.Set(HTTPHeaderContentType, "application/json") + request.Header.Set(HTTPHeaderDate, date) + request.Header.Set(HTTPHeaderHost, c.Endpoint) + c.SignedReq(request, signResource) + client := &http.Client{} + res, err := client.Do(request) + if res != nil { + defer res.Body.Close() + body, _ := ioutil.ReadAll(res.Body) + xml.Unmarshal((body), &out) + if err != nil { + fmt.Println(err) + } + if res.Header != nil { + out.Header = Header(res.Header) + out.HttpCode = res.StatusCode + if res.StatusCode == 200 { + json.Unmarshal(out.Body, &out.BucketMirror) + } + } + if body != nil { + out.Body = body + } + } + return out, err + +} + +type DeleteBucketMirrorInput struct { + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} +type DeleteBucketMirrorOutput struct { + Ks3WebServiceResponse `json:"-" xml:"-"` +} + +func (c *S3) DeleteBucketMirror(input *DeleteBucketMirrorInput) (*DeleteBucketMirrorOutput, error) { + + oprw.Lock() + defer oprw.Unlock() + + if input == nil { + input = &DeleteBucketMirrorInput{} + } + out := &DeleteBucketMirrorOutput{} + date := time.Now().UTC().Format(http.TimeFormat) + resource := "/" + *input.Bucket + "?mirror" + signResource := "/" + *input.Bucket + "/?mirror" + url := c.Endpoint + resource + request, _ := http.NewRequest("DELETE", url, nil) + request.Header.Set(HTTPHeaderContentType, "application/json") + request.Header.Set(HTTPHeaderDate, date) + request.Header.Set(HTTPHeaderHost, c.Endpoint) + c.SignedReq(request, signResource) + client := &http.Client{} + res, err := client.Do(request) + if res != nil { + defer res.Body.Close() + body, _ := ioutil.ReadAll(res.Body) + xml.Unmarshal((body), &out) + if err != nil { + fmt.Println(err) + } + if res.Header != nil { + out.Header = Header(res.Header) + out.HttpCode = res.StatusCode + } + if body != nil { + out.Body = body + } + } + return out, err + +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/const.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/const.go new file mode 100644 index 0000000..af18957 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/const.go @@ -0,0 +1,19 @@ +package s3 + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderkssACL = "X-kss-Acl" +) diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/errors.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/errors.go new file mode 100644 index 0000000..6d3e726 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This action is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 0000000..c1b5394 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,53 @@ +package s3 + +import ( + "regexp" + "strings" + + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/aws/awsutil" +) + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// hostStyleBucketName returns true if the request should put the bucket in +// the host. This is false if S3ForcePathStyle is explicitly set or if the +// bucket is not DNS compatible. +func hostStyleBucketName(r *aws.Request, bucket string) bool { + if r.Config.S3ForcePathStyle { + return false + } + + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // Use host-style if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +func updateHostWithBucket(r *aws.Request) { + b := awsutil.ValuesAtPath(r.Params, "Bucket") + if len(b) == 0 { + return + } + + if bucket := b[0].(string); bucket != "" && hostStyleBucketName(r, bucket) { + r.HTTPRequest.URL.Host = bucket + "." + r.HTTPRequest.URL.Host + r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) + if r.HTTPRequest.URL.Path == "" { + r.HTTPRequest.URL.Path = "/" + } + } +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/service.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/service.go new file mode 100644 index 0000000..1476944 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/service.go @@ -0,0 +1,57 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/ks3sdklib/aws-sdk-go/aws" + "github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml" + "github.com/ks3sdklib/aws-sdk-go/internal/signer/v2" +) + +// S3 is a client for Amazon S3. +type S3 struct { + *aws.Service +} + +// Used for custom service initialization logic +var initService func(*aws.Service) + +// Used for custom request initialization logic +var initRequest func(*aws.Request) + +// New returns a new S3 client. +func New(config *aws.Config) *S3 { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "s3", + APIVersion: "2006-03-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v2.Sign) + service.Handlers.Build.PushBack(restxml.Build) + //service.Handlers.Build.PushBack(aws.ContentTypeHandler) + service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + + // Run custom service initialization if present + if initService != nil { + initService(service) + } + + return &S3{service} +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + updateHostWithBucket(req) + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + return req +} diff --git a/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/try.go b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/try.go new file mode 100644 index 0000000..d2ba553 --- /dev/null +++ b/vendor/github.com/ks3sdklib/aws-sdk-go/service/s3/try.go @@ -0,0 +1,35 @@ +package s3 +import "errors" + +// MaxRetries is the maximum number of retries before bailing. +var MaxRetries = 10 + +var errMaxRetriesReached = errors.New("exceeded retry limit") + +// Func represents functions that can be retried. +type Func func(attempt int) (retry bool, err error) + +// Do keeps trying the function until the second argument +// returns false, or no error is returned. +func Do(fn Func) error { + var err error + var cont bool + attempt := 1 + for { + cont, err = fn(attempt) + if !cont || err == nil { + break + } + attempt++ + if attempt > MaxRetries { + return errMaxRetriesReached + } + } + return err +} + +// IsMaxRetries checks whether the error is due to hitting the +// maximum number of retries or not. +func IsMaxRetries(err error) bool { + return err == errMaxRetriesReached +} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 0000000..c758234 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,96 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 0000000..0018dc7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 0000000..3a754ca --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 0000000..47a99e5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 0000000..1efb22a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1540 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/mozillazg/go-httpheader/.bumpversion.cfg b/vendor/github.com/mozillazg/go-httpheader/.bumpversion.cfg new file mode 100644 index 0000000..62e4d5f --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +commit = True +tag = True +current_version = 0.3.1 + +[bumpversion:file:encode.go] + diff --git a/vendor/github.com/mozillazg/go-httpheader/.gitignore b/vendor/github.com/mozillazg/go-httpheader/.gitignore new file mode 100644 index 0000000..098e254 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +dist/ +cover.html +cover.out diff --git a/vendor/github.com/mozillazg/go-httpheader/CHANGELOG.md b/vendor/github.com/mozillazg/go-httpheader/CHANGELOG.md new file mode 100644 index 0000000..a559008 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/CHANGELOG.md @@ -0,0 +1,28 @@ +# Changelog + +## [0.3.1] (2022-04-09) + +* fix Decode: don't fill value for struct fields that don't exist in header + + +## [0.3.0] (2021-01-24) + +* add `func Decode(header http.Header, v interface{}) error` to support decoding headers into struct + +## [0.2.1] (2018-11-03) + +* add go.mod file to identify as a module + + +## [0.2.0] (2017-06-24) + +* support http.Header field. + + +## 0.1.0 (2017-06-10) + +* Initial Release + +[0.2.0]: https://github.com/mozillazg/go-httpheader/compare/v0.1.0...v0.2.0 +[0.2.1]: https://github.com/mozillazg/go-httpheader/compare/v0.2.0...v0.2.1 +[0.3.0]: https://github.com/mozillazg/go-httpheader/compare/v0.2.1...v0.3.0 diff --git a/vendor/github.com/mozillazg/go-httpheader/LICENSE b/vendor/github.com/mozillazg/go-httpheader/LICENSE new file mode 100644 index 0000000..8ff7942 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 mozillazg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mozillazg/go-httpheader/Makefile b/vendor/github.com/mozillazg/go-httpheader/Makefile new file mode 100644 index 0000000..7ab8078 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/Makefile @@ -0,0 +1,15 @@ +help: + @echo "test run test" + @echo "lint run lint" + +.PHONY: test +test: + go test -race -v -cover -coverprofile cover.out + go tool cover -html=cover.out -o cover.html + +.PHONY: lint +lint: + gofmt -s -w . + goimports -w . + golint . + go vet diff --git a/vendor/github.com/mozillazg/go-httpheader/README.md b/vendor/github.com/mozillazg/go-httpheader/README.md new file mode 100644 index 0000000..dc81728 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/README.md @@ -0,0 +1,67 @@ +# go-httpheader + +go-httpheader is a Go library for encoding structs into Header fields. + +[![Build Status](https://github.com/mozillazg/go-httpheader/workflows/CI/badge.svg?branch=master)](https://github.com/mozillazg/go-httpheader/actions) +[![Coverage Status](https://coveralls.io/repos/github/mozillazg/go-httpheader/badge.svg?branch=master)](https://coveralls.io/github/mozillazg/go-httpheader?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mozillazg/go-httpheader)](https://goreportcard.com/report/github.com/mozillazg/go-httpheader) +[![GoDoc](https://godoc.org/github.com/mozillazg/go-httpheader?status.svg)](https://godoc.org/github.com/mozillazg/go-httpheader) + +## install + +`go get -u github.com/mozillazg/go-httpheader` + + +## usage + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/mozillazg/go-httpheader" +) + +type Options struct { + hide string + ContentType string `header:"Content-Type"` + Length int + XArray []string `header:"X-Array"` + TestHide string `header:"-"` + IgnoreEmpty string `header:"X-Empty,omitempty"` + IgnoreEmptyN string `header:"X-Empty-N,omitempty"` + CustomHeader http.Header +} + +func main() { + opt := Options{ + hide: "hide", + ContentType: "application/json", + Length: 2, + XArray: []string{"test1", "test2"}, + TestHide: "hide", + IgnoreEmptyN: "n", + CustomHeader: http.Header{ + "X-Test-1": []string{"233"}, + "X-Test-2": []string{"666"}, + }, + } + h, _ := httpheader.Header(opt) + fmt.Printf("%#v", h) + // h: + // http.Header{ + // "X-Test-1": []string{"233"}, + // "X-Test-2": []string{"666"}, + // "Content-Type": []string{"application/json"}, + // "Length": []string{"2"}, + // "X-Array": []string{"test1", "test2"}, + // "X-Empty-N": []string{"n"}, + // } + + // decode + var decode Options + httpheader.Decode(h, &decode) +} +``` diff --git a/vendor/github.com/mozillazg/go-httpheader/decode.go b/vendor/github.com/mozillazg/go-httpheader/decode.go new file mode 100644 index 0000000..7a9797e --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/decode.go @@ -0,0 +1,267 @@ +package httpheader + +import ( + "fmt" + "net/http" + "net/textproto" + "reflect" + "strconv" + "time" +) + +// Decoder is an interface implemented by any type that wishes to decode +// itself from Header fields in a non-standard way. +type Decoder interface { + DecodeHeader(header http.Header, key string) error +} + +// Decode expects to be passed an http.Header and a struct, and parses +// header into the struct recursively using the same rules as Header (see above) +func Decode(header http.Header, v interface{}) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr || val.IsNil() { + return fmt.Errorf("v should be point and should not be nil") + } + + for val.Kind() == reflect.Ptr { + val = val.Elem() + } + + if val.Kind() != reflect.Struct { + return fmt.Errorf("v is not a struct %+v", val.Kind()) + } + return parseValue(header, val) +} + +func parseValue(header http.Header, val reflect.Value) error { + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get(tagName) + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + continue + } + name = sf.Name + } + + if opts.Contains("omitempty") && header.Get(name) == "" { + continue + } + + // Decoder interface + addr := sv + if addr.Kind() != reflect.Ptr && addr.Type().Name() != "" && addr.CanAddr() { + addr = addr.Addr() + } + if addr.Type().NumMethod() > 0 && addr.CanInterface() { + if m, ok := addr.Interface().(Decoder); ok { + if err := m.DecodeHeader(header, name); err != nil { + return err + } + continue + } + } + + if sv.Kind() == reflect.Ptr { + valArr, exist := headerValues(header, name) + if !exist { + continue + } + ve := reflect.New(sv.Type().Elem()) + if err := fillValues(ve, opts, valArr); err != nil { + return err + } + sv.Set(ve) + continue + } + + if sv.Type() == timeType { + valArr, exist := headerValues(header, name) + if !exist { + continue + } + if err := fillValues(sv, opts, valArr); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Struct { + if err := parseValue(header, sv); err != nil { + return err + } + continue + } + + valArr, exist := headerValues(header, name) + if !exist { + continue + } + if err := fillValues(sv, opts, valArr); err != nil { + return err + } + } + return nil +} + +func fillValues(sv reflect.Value, opts tagOptions, valArr []string) error { + var err error + var value string + if len(valArr) > 0 { + value = valArr[0] + } + for sv.Kind() == reflect.Ptr { + sv = sv.Elem() + } + + switch sv.Kind() { + case reflect.Bool: + var v bool + if opts.Contains("int") { + v = value != "0" + } else { + v = value != "false" + } + sv.SetBool(v) + return nil + case reflect.String: + sv.SetString(value) + return nil + case reflect.Uint, reflect.Uint64: + var v uint64 + if v, err = strconv.ParseUint(value, 10, 64); err != nil { + return err + } + sv.SetUint(v) + return nil + case reflect.Uint8: + var v uint64 + if v, err = strconv.ParseUint(value, 10, 8); err != nil { + return err + } + sv.SetUint(v) + return nil + case reflect.Uint16: + var v uint64 + if v, err = strconv.ParseUint(value, 10, 16); err != nil { + return err + } + sv.SetUint(v) + return nil + case reflect.Uint32: + var v uint64 + if v, err = strconv.ParseUint(value, 10, 32); err != nil { + return err + } + sv.SetUint(v) + return nil + case reflect.Int, reflect.Int64: + var v int64 + if v, err = strconv.ParseInt(value, 10, 64); err != nil { + return err + } + sv.SetInt(v) + return nil + case reflect.Int8: + var v int64 + if v, err = strconv.ParseInt(value, 10, 8); err != nil { + return err + } + sv.SetInt(v) + return nil + case reflect.Int16: + var v int64 + if v, err = strconv.ParseInt(value, 10, 16); err != nil { + return err + } + sv.SetInt(v) + return nil + case reflect.Int32: + var v int64 + if v, err = strconv.ParseInt(value, 10, 32); err != nil { + return err + } + sv.SetInt(v) + return nil + case reflect.Float32: + var v float64 + if v, err = strconv.ParseFloat(value, 32); err != nil { + return err + } + sv.SetFloat(v) + return nil + case reflect.Float64: + var v float64 + if v, err = strconv.ParseFloat(value, 64); err != nil { + return err + } + sv.SetFloat(v) + return nil + case reflect.Slice: + v := reflect.MakeSlice(sv.Type(), len(valArr), len(valArr)) + for i, s := range valArr { + eleV := reflect.New(sv.Type().Elem()).Elem() + if err := fillValues(eleV, opts, []string{s}); err != nil { + return err + } + v.Index(i).Set(eleV) + } + sv.Set(v) + return nil + case reflect.Array: + v := reflect.Indirect(reflect.New(reflect.ArrayOf(sv.Len(), sv.Type().Elem()))) + length := len(valArr) + if sv.Len() < length { + length = sv.Len() + } + for i := 0; i < length; i++ { + eleV := reflect.New(sv.Type().Elem()).Elem() + if err := fillValues(eleV, opts, []string{valArr[i]}); err != nil { + return err + } + v.Index(i).Set(eleV) + } + sv.Set(v) + return nil + case reflect.Interface: + v := reflect.ValueOf(valArr) + sv.Set(v) + return nil + } + + if sv.Type() == timeType { + var v time.Time + if opts.Contains("unix") { + u, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + v = time.Unix(u, 0).UTC() + } else { + v, err = time.Parse(http.TimeFormat, value) + if err != nil { + return err + } + } + sv.Set(reflect.ValueOf(v)) + return nil + } + + // sv.Set(reflect.ValueOf(value)) + return nil +} + +func headerValues(h http.Header, key string) ([]string, bool) { + vs, ok := textproto.MIMEHeader(h)[textproto.CanonicalMIMEHeaderKey(key)] + return vs, ok +} diff --git a/vendor/github.com/mozillazg/go-httpheader/encode.go b/vendor/github.com/mozillazg/go-httpheader/encode.go new file mode 100644 index 0000000..98c03f5 --- /dev/null +++ b/vendor/github.com/mozillazg/go-httpheader/encode.go @@ -0,0 +1,297 @@ +// Package httpheader implements encoding of structs into http.Header fields. +// +// As a simple example: +// +// type Options struct { +// ContentType string `header:"Content-Type"` +// Length int +// } +// +// opt := Options{"application/json", 2} +// h, _ := httpheader.Header(opt) +// fmt.Printf("%#v", h) +// // will output: +// // http.Header{"Content-Type":[]string{"application/json"},"Length":[]string{"2"}} +// +// The exact mapping between Go values and http.Header is described in the +// documentation for the Header() function. +package httpheader + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "time" +) + +const tagName = "header" + +// Version ... +const Version = "0.3.1" + +var timeType = reflect.TypeOf(time.Time{}) +var headerType = reflect.TypeOf(http.Header{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into Header fields in a non-standard way. +type Encoder interface { + EncodeHeader(key string, v *http.Header) error +} + +// Header returns the http.Header encoding of v. +// +// Header expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a Header field unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any time.Time that returns true +// for IsZero(). +// +// The Header field name defaults to the struct field name but can be +// specified in the struct field's tag value. The "header" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `header:"-"` +// +// // Field appears as Header field "X-Name". +// Field int `header:"X-Name"` +// +// // Field appears as Header field "X-Name" and the field is omitted if +// // its value is empty +// Field int `header:"X-Name,omitempty"` +// +// // Field appears as Header field "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `header:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC1123("Mon, 02 Jan 2006 15:04:05 GMT") +// timestamps. Including the "unix" option signals that the field should be +// encoded as a Unix time (see time.Unix()) +// +// Slice and Array values default to encoding as multiple Header values of the +// same name. example: +// X-Name: []string{"Tom", "Jim"}, etc. +// +// http.Header values will be used to extend the Header fields. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its Header +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same Header filed name will be included +// as multiple Header values of the same name. +func Header(v interface{}) (http.Header, error) { + h := make(http.Header) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return h, nil + } + val = val.Elem() + } + + if v == nil { + return h, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("httpheader: Header() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(h, val) + return h, err +} + +// reflectValue populates the header fields from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(header http.Header, val reflect.Value) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get(tagName) + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, sv) + continue + } + + name = sf.Name + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + if !reflect.Indirect(sv).IsValid() { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeHeader(name, &header); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + for i := 0; i < sv.Len(); i++ { + k := name + header.Add(k, valueString(sv.Index(i), opts)) + } + continue + } + + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Type() == timeType { + header.Add(name, valueString(sv, opts)) + continue + } + if sv.Type() == headerType { + h := sv.Interface().(http.Header) + for k, vs := range h { + for _, v := range vs { + header.Add(k, v) + } + } + continue + } + + if sv.Kind() == reflect.Struct { + if err := reflectValue(header, sv); err != nil { + return err + } + continue + } + + header.Add(name, valueString(sv, opts)) + } + + for _, f := range embedded { + if err := reflectValue(header, f); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + return t.Format(http.TimeFormat) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + if v.Type() == timeType { + return v.Interface().(time.Time).IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "header" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's header tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} + +// Encode is an alias of Header function +func Encode(v interface{}) (http.Header, error) { + return Header(v) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/.gitignore b/vendor/github.com/qiniu/go-sdk/v7/.gitignore new file mode 100644 index 0000000..345a84f --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/.gitignore @@ -0,0 +1,35 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.dylib +*.dll + +# Folders +_obj +_test +.idea +.vscode +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# coverage file +coverage.txt + +# Go workspace file +go.work diff --git a/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md b/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md new file mode 100644 index 0000000..b0f9f93 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md @@ -0,0 +1,56 @@ +# Changelog + +## 7.13.0 +* 对象存储,管理类 API 发送请求时增加 [X-Qiniu-Date](https://developer.qiniu.com/kodo/3924/common-request-headers) (生成请求的时间) header + +## 7.12.1 +* 对象存储,补充 Stat API 可查看对象元数据信息 + +## 7.12.0 +* 对象存储,新增支持 [深度归档存储类型](https://developer.qiniu.com/kodo/3956/kodo-category#deep_archive) +* 对象存储,全面支持 Qiniu 签名 + +## 7.11.1 +* 优化容器环境下 pod 当前内存工作集 (working set) 使用量 + +## 7.11.0 +* 新增直播云服务端管理能力,包括:直播空间管理、域名管理、推拉流地址管理、直播流管理和统计数据查询 API + + +## 7.10.1 +* 优化了分片上传内存占用 +* 修复部分已知问题 + +## 7.10.0 +* 增加了 PutWithoutKeyAndSize API,在上传时支持可不指定 size 和 key +* 修复了 已知 UcQuery 解析问题 +## 7.9.8 +* 补充了查询 object 元信息返回部分字段 + +## 7.9.7 +* 修复了表单上传 FormUploader 在内部重试情况下的已知问题 + +## 7.9.6 +* 在需要指定存储服务 host 情况下兼容了只配置域名和同时指定域名和访问 protocol 的问题 + +## 7.9.5 +优化几个已知小问题 +* 支持指定空间管理域名,默认是公有云地址 +* 支持下载 escape 编码文件 +* 优化对一些错误情况处理 + +## 7.9.4 +* 兼容不同格式上传token + +## 7.9.3 +* 修复在复用上传token时,过期时间叠加问题 + +## 7.9.2 +* UploadPartInfo 结构体公开使用,可用于定制分片上传过程 +* 保持兼容支持上传API extra.UpHost参数 + +## 7.9.1 +* 修复buckets api 已知问题 + +## 7.9.0 +* 从 github.com/qiniu/api.v7 迁移至 github.com/qiniu/go-sdk diff --git a/vendor/github.com/qiniu/go-sdk/v7/LICENSE b/vendor/github.com/qiniu/go-sdk/v7/LICENSE new file mode 100644 index 0000000..a39863f --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Qiniu, Ltd. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/qiniu/go-sdk/v7/Makefile b/vendor/github.com/qiniu/go-sdk/v7/Makefile new file mode 100644 index 0000000..47a0680 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/Makefile @@ -0,0 +1,5 @@ +test: + go test -tags='unit integration' -failfast -v -timeout 350m -coverprofile=coverage.txt `go list ./... | egrep -v 'examples|sms'` + +unittest: + go test -tags=unit -failfast -v -coverprofile=coverage.txt `go list ./... | egrep -v 'examples|sms'` diff --git a/vendor/github.com/qiniu/go-sdk/v7/OWNERS b/vendor/github.com/qiniu/go-sdk/v7/OWNERS new file mode 100644 index 0000000..6a3637a --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/OWNERS @@ -0,0 +1,14 @@ +# see the OWNERS docs at https://go.k8s.io/owners + +approvers: +- CarlJi +- bachue +- YangSen-qn +- Mei-Zhao +- sxci +reviewers: +- CarlJi +- bachue +- YangSen-qn +- Mei-Zhao +- sxci diff --git a/vendor/github.com/qiniu/go-sdk/v7/README.md b/vendor/github.com/qiniu/go-sdk/v7/README.md new file mode 100644 index 0000000..49c2038 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/README.md @@ -0,0 +1,44 @@ +github.com/qiniu/go-sdk +=============== + +[![LICENSE](https://img.shields.io/github/license/qiniu/go-sdk.svg)](https://github.com/qiniu/go-sdk/blob/master/LICENSE) +[![Build Status](https://github.com/qiniu/go-sdk/workflows/Run%20Test%20Cases/badge.svg)](https://github.com/qiniu/go-sdk/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/qiniu/go-sdk)](https://goreportcard.com/report/github.com/qiniu/go-sdk) +[![GitHub release](https://img.shields.io/github/v/tag/qiniu/go-sdk.svg?label=release)](https://github.com/qiniu/go-sdk/releases) +[![codecov](https://codecov.io/gh/qiniu/go-sdk/branch/master/graph/badge.svg)](https://codecov.io/gh/qiniu/go-sdk) +[![GoDoc](https://godoc.org/github.com/qiniu/go-sdk?status.svg)](https://godoc.org/github.com/qiniu/go-sdk) + +[![Qiniu Logo](http://open.qiniudn.com/logo.png)](http://qiniu.com/) + +# 下载 + +## 使用 Go mod【推荐】 + +在您的项目中的 `go.mod` 文件内添加这行代码 + +``` +require github.com/qiniu/go-sdk/v7 v7.13.0 +``` + +并且在项目中使用 `"github.com/qiniu/go-sdk/v7"` 引用 Qiniu Go SDK。 + +例如 + +```go +import ( + "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/storage" +) +``` + +# Golang 版本需求 + +需要 go1.10 或者 1.10 以上 + +# 文档 + +[七牛SDK文档站](https://developer.qiniu.com/kodo/sdk/1238/go) 或者 [项目WIKI](https://github.com/qiniu/go-sdk/wiki) + +# 示例 + +[参考代码](https://github.com/qiniu/go-sdk/tree/master/examples) diff --git a/vendor/github.com/qiniu/go-sdk/v7/auth/context.go b/vendor/github.com/qiniu/go-sdk/v7/auth/context.go new file mode 100644 index 0000000..217722b --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/auth/context.go @@ -0,0 +1,36 @@ +package auth + +import ( + "context" +) + +// MacContextKey 是用户的密钥信息 +// context.Context中的键值不应该使用普通的字符串, 有可能导致命名冲突 +type macContextKey struct{} + +// tokenTypeKey 是签名算法类型key +type tokenTypeKey struct{} + +// WithCredentials 返回一个包含密钥信息的context +func WithCredentials(ctx context.Context, cred *Credentials) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, macContextKey{}, cred) +} + +// WithCredentialsType 返回一个context, 保存了密钥信息和token类型 +func WithCredentialsType(ctx context.Context, cred *Credentials, t TokenType) context.Context { + ctx = WithCredentials(ctx, cred) + return context.WithValue(ctx, tokenTypeKey{}, t) +} + +// CredentialsFromContext 从context获取密钥信息 +func CredentialsFromContext(ctx context.Context) (cred *Credentials, t TokenType, ok bool) { + cred, ok = ctx.Value(macContextKey{}).(*Credentials) + t, yes := ctx.Value(tokenTypeKey{}).(TokenType) + if !yes { + t = TokenQBox + } + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/auth/credentials.go b/vendor/github.com/qiniu/go-sdk/v7/auth/credentials.go new file mode 100644 index 0000000..3b814b8 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/auth/credentials.go @@ -0,0 +1,222 @@ +package auth + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "net/textproto" + "sort" + "strings" + + api "github.com/qiniu/go-sdk/v7" + "github.com/qiniu/go-sdk/v7/conf" +) + +const ( + IAMKeyLen = 33 + IAMKeyPrefix = "IAM-" +) + +// 七牛鉴权类,用于生成Qbox, Qiniu, Upload签名 +// AK/SK可以从 https://portal.qiniu.com/user/key 获取 +type Credentials struct { + AccessKey string + SecretKey []byte +} + +// 构建一个Credentials对象 +func New(accessKey, secretKey string) *Credentials { + return &Credentials{accessKey, []byte(secretKey)} +} + +// Sign 对数据进行签名,一般用于私有空间下载用途 +func (ath *Credentials) Sign(data []byte) (token string) { + h := hmac.New(sha1.New, ath.SecretKey) + h.Write(data) + + sign := base64.URLEncoding.EncodeToString(h.Sum(nil)) + return fmt.Sprintf("%s:%s", ath.AccessKey, sign) +} + +// SignToken 根据t的类型对请求进行签名,并把token加入req中 +func (ath *Credentials) AddToken(t TokenType, req *http.Request) error { + switch t { + case TokenQiniu: + token, sErr := ath.SignRequestV2(req) + if sErr != nil { + return sErr + } + req.Header.Add("Authorization", "Qiniu "+token) + default: + token, err := ath.SignRequest(req) + if err != nil { + return err + } + req.Header.Add("Authorization", "QBox "+token) + } + return nil +} + +// SignWithData 对数据进行签名,一般用于上传凭证的生成用途 +func (ath *Credentials) SignWithData(b []byte) (token string) { + encodedData := base64.URLEncoding.EncodeToString(b) + sign := ath.Sign([]byte(encodedData)) + return fmt.Sprintf("%s:%s", sign, encodedData) +} + +// IsIAMKey 判断AccessKey是否为IAM的Key +func (ath *Credentials) IsIAMKey() bool { + return len(ath.AccessKey) == IAMKeyLen*4/3 && + strings.HasPrefix(ath.AccessKey, IAMKeyPrefix) +} + +func collectData(req *http.Request) (data []byte, err error) { + u := req.URL + s := u.Path + if u.RawQuery != "" { + s += "?" + s += u.RawQuery + } + s += "\n" + + data = []byte(s) + if incBody(req) { + s2, rErr := api.BytesFromRequest(req) + if rErr != nil { + err = rErr + return + } + req.Body = ioutil.NopCloser(bytes.NewReader(s2)) + data = append(data, s2...) + } + return +} + +type ( + xQiniuHeaderItem struct { + HeaderName string + HeaderValue string + } + xQiniuHeaders []xQiniuHeaderItem +) + +func (headers xQiniuHeaders) Len() int { + return len(headers) +} + +func (headers xQiniuHeaders) Less(i, j int) bool { + if headers[i].HeaderName < headers[j].HeaderName { + return true + } else if headers[i].HeaderName > headers[j].HeaderName { + return false + } else { + return headers[i].HeaderValue < headers[j].HeaderValue + } +} + +func (headers xQiniuHeaders) Swap(i, j int) { + headers[i], headers[j] = headers[j], headers[i] +} + +func collectDataV2(req *http.Request) (data []byte, err error) { + u := req.URL + + //write method path?query + s := fmt.Sprintf("%s %s", req.Method, u.Path) + if u.RawQuery != "" { + s += "?" + s += u.RawQuery + } + + //write host and post + s += "\nHost: " + req.Host + "\n" + + //write content type + contentType := req.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/x-www-form-urlencoded" + req.Header.Set("Content-Type", contentType) + } + s += fmt.Sprintf("Content-Type: %s\n", contentType) + + xQiniuHeaders := make(xQiniuHeaders, 0, len(req.Header)) + for headerName := range req.Header { + if len(headerName) > len("X-Qiniu-") && strings.HasPrefix(headerName, "X-Qiniu-") { + xQiniuHeaders = append(xQiniuHeaders, xQiniuHeaderItem{ + HeaderName: textproto.CanonicalMIMEHeaderKey(headerName), + HeaderValue: req.Header.Get(headerName), + }) + } + } + + if len(xQiniuHeaders) > 0 { + sort.Sort(xQiniuHeaders) + for _, xQiniuHeader := range xQiniuHeaders { + s += fmt.Sprintf("%s: %s\n", xQiniuHeader.HeaderName, xQiniuHeader.HeaderValue) + } + } + s += "\n" + + data = []byte(s) + //write body + if incBodyV2(req) { + s2, rErr := api.BytesFromRequest(req) + if rErr != nil { + err = rErr + return + } + req.Body = ioutil.NopCloser(bytes.NewReader(s2)) + data = append(data, s2...) + } + return +} + +// SignRequest 对数据进行签名,一般用于管理凭证的生成 +func (ath *Credentials) SignRequest(req *http.Request) (token string, err error) { + data, err := collectData(req) + if err != nil { + return + } + token = ath.Sign(data) + return +} + +// SignRequestV2 对数据进行签名,一般用于高级管理凭证的生成 +func (ath *Credentials) SignRequestV2(req *http.Request) (token string, err error) { + + data, err := collectDataV2(req) + if err != nil { + return + } + token = ath.Sign(data) + return +} + +// 管理凭证生成时,是否同时对request body进行签名 +func incBody(req *http.Request) bool { + return req.Body != nil && req.Header.Get("Content-Type") == conf.CONTENT_TYPE_FORM +} + +func incBodyV2(req *http.Request) bool { + contentType := req.Header.Get("Content-Type") + return req.Body != nil && (contentType == conf.CONTENT_TYPE_FORM || contentType == conf.CONTENT_TYPE_JSON) +} + +// VerifyCallback 验证上传回调请求是否来自七牛 +func (ath *Credentials) VerifyCallback(req *http.Request) (bool, error) { + auth := req.Header.Get("Authorization") + if auth == "" { + return false, nil + } + + token, err := ath.SignRequest(req) + if err != nil { + return false, err + } + + return auth == "QBox "+token, nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/doc.go b/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/doc.go new file mode 100644 index 0000000..aa1de15 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/doc.go @@ -0,0 +1,2 @@ +// qbox 包提供了该SDK需要的相关鉴权方法 +package qbox diff --git a/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/qbox_auth.go b/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/qbox_auth.go new file mode 100644 index 0000000..c2be010 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/auth/qbox/qbox_auth.go @@ -0,0 +1,28 @@ +package qbox + +import ( + "github.com/qiniu/go-sdk/v7/auth" + "net/http" +) + +type Mac = auth.Credentials + +// 兼容保留 +func NewMac(accessKey, secretKey string) *Mac { + return auth.New(accessKey, secretKey) +} + +// Sign 一般用于下载凭证的签名 +func Sign(mac *Mac, data []byte) string { + return mac.Sign(data) +} + +// SignWithData 一般用于上传凭证的签名 +func SignWithData(mac *Mac, data []byte) string { + return mac.SignWithData(data) +} + +// VerifyCallback 验证上传回调请求是否来自七牛 +func VerifyCallback(mac *Mac, req *http.Request) (bool, error) { + return mac.VerifyCallback(req) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/auth/signer.go b/vendor/github.com/qiniu/go-sdk/v7/auth/signer.go new file mode 100644 index 0000000..7b1dfef --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/auth/signer.go @@ -0,0 +1,10 @@ +package auth + +// 七牛签名算法的类型: +// QBoxToken, QiniuToken, BearToken, QiniuMacToken +type TokenType int + +const ( + TokenQiniu TokenType = iota + TokenQBox +) diff --git a/vendor/github.com/qiniu/go-sdk/v7/client/client.go b/vendor/github.com/qiniu/go-sdk/v7/client/client.go new file mode 100644 index 0000000..7368d5e --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/client/client.go @@ -0,0 +1,381 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptrace" + "net/http/httputil" + "net/url" + "runtime" + "strings" + + "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/conf" + "github.com/qiniu/go-sdk/v7/internal/log" + "github.com/qiniu/go-sdk/v7/reqid" +) + +var UserAgent = "Golang qiniu/client package" +var DefaultClient = Client{&http.Client{Transport: http.DefaultTransport}} + +// 用来打印调试信息 +var DebugMode = false +var DeepDebugInfo = false + +// -------------------------------------------------------------------- + +// Client 负责发送HTTP请求到七牛接口服务器 +type Client struct { + *http.Client +} + +// TurnOnDebug 开启Debug模式 +func TurnOnDebug() { + DebugMode = true +} + +// userApp should be [A-Za-z0-9_\ \-\.]* +func SetAppName(userApp string) error { + UserAgent = fmt.Sprintf( + "QiniuGo/%s (%s; %s; %s) %s", conf.Version, runtime.GOOS, runtime.GOARCH, userApp, runtime.Version()) + return nil +} + +// -------------------------------------------------------------------- + +func newRequest(ctx context.Context, method, reqUrl string, headers http.Header, body io.Reader) (req *http.Request, err error) { + req, err = http.NewRequest(method, reqUrl, body) + if err != nil { + return + } + + if headers == nil { + headers = http.Header{} + } + + err = addDefaultHeader(headers) + if err != nil { + return + } + + req.Header = headers + req = req.WithContext(ctx) + + //check access token + mac, t, ok := auth.CredentialsFromContext(ctx) + if ok { + err = mac.AddToken(t, req) + if err != nil { + return + } + } + if DebugMode { + trace := &httptrace.ClientTrace{ + GotConn: func(connInfo httptrace.GotConnInfo) { + remoteAddr := connInfo.Conn.RemoteAddr() + log.Debug(fmt.Sprintf("Network: %s, Remote ip:%s, URL: %s", remoteAddr.Network(), remoteAddr.String(), req.URL)) + }, + } + req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) + bs, bErr := httputil.DumpRequest(req, DeepDebugInfo) + if bErr != nil { + err = bErr + return + } + log.Debug(string(bs)) + } + return +} + +func (r Client) DoRequest(ctx context.Context, method, reqUrl string, headers http.Header) (resp *http.Response, err error) { + req, err := newRequest(ctx, method, reqUrl, headers, nil) + if err != nil { + return + } + return r.Do(ctx, req) +} + +func (r Client) DoRequestWith(ctx context.Context, method, reqUrl string, headers http.Header, body io.Reader, + bodyLength int) (resp *http.Response, err error) { + + req, err := newRequest(ctx, method, reqUrl, headers, body) + if err != nil { + return + } + req.ContentLength = int64(bodyLength) + return r.Do(ctx, req) +} + +func (r Client) DoRequestWith64(ctx context.Context, method, reqUrl string, headers http.Header, body io.Reader, + bodyLength int64) (resp *http.Response, err error) { + + req, err := newRequest(ctx, method, reqUrl, headers, body) + if err != nil { + return + } + req.ContentLength = bodyLength + return r.Do(ctx, req) +} + +func (r Client) DoRequestWithBodyGetter(ctx context.Context, method, reqUrl string, headers http.Header, body io.Reader, + getBody func() (io.ReadCloser, error), bodyLength int64) (resp *http.Response, err error) { + + req, err := newRequest(ctx, method, reqUrl, headers, body) + if err != nil { + return + } + req.ContentLength = bodyLength + req.GetBody = getBody + return r.Do(ctx, req) +} + +func (r Client) DoRequestWithForm(ctx context.Context, method, reqUrl string, headers http.Header, + data map[string][]string) (resp *http.Response, err error) { + + if headers == nil { + headers = http.Header{} + } + headers.Add("Content-Type", "application/x-www-form-urlencoded") + + requestData := url.Values(data).Encode() + if method == "GET" || method == "HEAD" || method == "DELETE" { + if strings.ContainsRune(reqUrl, '?') { + reqUrl += "&" + } else { + reqUrl += "?" + } + return r.DoRequest(ctx, method, reqUrl+requestData, headers) + } + + return r.DoRequestWith(ctx, method, reqUrl, headers, strings.NewReader(requestData), len(requestData)) +} + +func (r Client) DoRequestWithJson(ctx context.Context, method, reqUrl string, headers http.Header, + data interface{}) (resp *http.Response, err error) { + + reqBody, err := json.Marshal(data) + if err != nil { + return + } + + if headers == nil { + headers = http.Header{} + } + headers.Add("Content-Type", "application/json") + return r.DoRequestWith(ctx, method, reqUrl, headers, bytes.NewReader(reqBody), len(reqBody)) +} + +func (r Client) Do(ctx context.Context, req *http.Request) (resp *http.Response, err error) { + reqctx := req.Context() + + if reqId, ok := reqid.ReqidFromContext(ctx); ok { + req.Header.Set("X-Reqid", reqId) + } else if reqId, ok = reqid.ReqidFromContext(reqctx); ok { + req.Header.Set("X-Reqid", reqId) + } + + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", UserAgent) + } + + resp, err = r.Client.Do(req) + return +} + +// -------------------------------------------------------------------- + +type ErrorInfo struct { + Err string `json:"error,omitempty"` + Key string `json:"key,omitempty"` + Reqid string `json:"reqid,omitempty"` + Errno int `json:"errno,omitempty"` + Code int `json:"code"` +} + +func (r *ErrorInfo) ErrorDetail() string { + + msg, _ := json.Marshal(r) + return string(msg) +} + +func (r *ErrorInfo) Error() string { + + return r.Err +} + +func (r *ErrorInfo) RpcError() (code, errno int, key, err string) { + + return r.Code, r.Errno, r.Key, r.Err +} + +func (r *ErrorInfo) HttpCode() int { + + return r.Code +} + +// -------------------------------------------------------------------- + +func parseError(e *ErrorInfo, r io.Reader) { + + body, err1 := ioutil.ReadAll(r) + if err1 != nil { + e.Err = err1.Error() + return + } + + var ret struct { + Err string `json:"error"` + Key string `json:"key"` + Errno int `json:"errno"` + } + if decodeJsonFromData(body, &ret) == nil && ret.Err != "" { + // qiniu error msg style returns here + e.Err, e.Key, e.Errno = ret.Err, ret.Key, ret.Errno + return + } + e.Err = string(body) +} + +func ResponseError(resp *http.Response) (err error) { + + e := &ErrorInfo{ + Reqid: resp.Header.Get("X-Reqid"), + Code: resp.StatusCode, + } + if resp.StatusCode > 299 { + if resp.ContentLength != 0 { + ct, ok := resp.Header["Content-Type"] + if ok && strings.HasPrefix(ct[0], "application/json") { + parseError(e, resp.Body) + } else { + bs, rErr := ioutil.ReadAll(resp.Body) + if rErr != nil { + err = rErr + } + e.Err = strings.TrimRight(string(bs), "\n") + } + } + } + return e +} + +func CallRet(ctx context.Context, ret interface{}, resp *http.Response) (err error) { + + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + if DebugMode { + bs, dErr := httputil.DumpResponse(resp, DeepDebugInfo) + if dErr != nil { + err = dErr + return + } + log.Debug(string(bs)) + } + if resp.StatusCode/100 == 2 { + if ret != nil && resp.ContentLength != 0 { + err = decodeJsonFromReader(resp.Body, ret) + if err != nil { + return + } + } + return nil + } + return ResponseError(resp) +} + +func (r Client) CallWithForm(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header, + param map[string][]string) (err error) { + + resp, err := r.DoRequestWithForm(ctx, method, reqUrl, headers, param) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) CallWithJson(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header, + param interface{}) (err error) { + + resp, err := r.DoRequestWithJson(ctx, method, reqUrl, headers, param) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) CallWith(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader, + bodyLength int) (err error) { + + resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, body, bodyLength) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) CallWith64(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader, + bodyLength int64) (err error) { + + resp, err := r.DoRequestWith64(ctx, method, reqUrl, headers, body, bodyLength) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) CallWithBodyGetter(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader, + getBody func() (io.ReadCloser, error), bodyLength int64) (err error) { + + resp, err := r.DoRequestWithBodyGetter(ctx, method, reqUrl, headers, body, getBody, bodyLength) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) Call(ctx context.Context, ret interface{}, method, reqUrl string, headers http.Header) (err error) { + + resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0) + if err != nil { + return err + } + return CallRet(ctx, ret, resp) +} + +func (r Client) CredentialedCallWithForm(ctx context.Context, cred *auth.Credentials, tokenType auth.TokenType, ret interface{}, + method, reqUrl string, headers http.Header, param map[string][]string) error { + ctx = auth.WithCredentialsType(ctx, cred, tokenType) + return r.CallWithForm(ctx, ret, method, reqUrl, headers, param) +} + +func (r Client) CredentialedCallWithJson(ctx context.Context, cred *auth.Credentials, tokenType auth.TokenType, ret interface{}, + method, reqUrl string, headers http.Header, param interface{}) error { + ctx = auth.WithCredentialsType(ctx, cred, tokenType) + return r.CallWithJson(ctx, ret, method, reqUrl, headers, param) +} + +func (r Client) CredentialedCallWith(ctx context.Context, cred *auth.Credentials, tokenType auth.TokenType, ret interface{}, + method, reqUrl string, headers http.Header, body io.Reader, bodyLength int) error { + ctx = auth.WithCredentialsType(ctx, cred, tokenType) + return r.CallWith(ctx, ret, method, reqUrl, headers, body, bodyLength) +} + +func (r Client) CredentialedCallWith64(ctx context.Context, cred *auth.Credentials, tokenType auth.TokenType, ret interface{}, + method, reqUrl string, headers http.Header, body io.Reader, bodyLength int64) error { + ctx = auth.WithCredentialsType(ctx, cred, tokenType) + return r.CallWith64(ctx, ret, method, reqUrl, headers, body, bodyLength) +} + +func (r Client) CredentialedCall(ctx context.Context, cred *auth.Credentials, tokenType auth.TokenType, ret interface{}, + method, reqUrl string, headers http.Header) error { + ctx = auth.WithCredentialsType(ctx, cred, tokenType) + return r.Call(ctx, ret, method, reqUrl, headers) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/client/header.go b/vendor/github.com/qiniu/go-sdk/v7/client/header.go new file mode 100644 index 0000000..bd657be --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/client/header.go @@ -0,0 +1,25 @@ +package client + +import ( + "github.com/qiniu/go-sdk/v7/conf" + "net/http" + "time" +) + +const ( + RequestHeaderKeyXQiniuDate = "X-Qiniu-Date" +) + +func addDefaultHeader(headers http.Header) error { + return addXQiniuDate(headers) +} + +func addXQiniuDate(headers http.Header) error { + if conf.IsDisableQiniuTimestampSignature() { + return nil + } + + timeString := time.Now().UTC().Format("20060102T150405Z") + headers.Set(RequestHeaderKeyXQiniuDate, timeString) + return nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/client/json_decode.go b/vendor/github.com/qiniu/go-sdk/v7/client/json_decode.go new file mode 100644 index 0000000..6017896 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/client/json_decode.go @@ -0,0 +1,35 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +type jsonDecodeError struct { + original error + data []byte +} + +func (e jsonDecodeError) Error() string { return fmt.Sprintf("%s: %s", e.original.Error(), e.data) } + +func (e jsonDecodeError) Unwrap() error { return e.original } + +func decodeJsonFromData(data []byte, v interface{}) error { + err := json.Unmarshal(data, v) + if err != nil { + return jsonDecodeError{original: err, data: data} + } + return nil +} + +func decodeJsonFromReader(reader io.Reader, v interface{}) error { + buf := new(bytes.Buffer) + t := io.TeeReader(reader, buf) + err := json.NewDecoder(t).Decode(v) + if err != nil { + return jsonDecodeError{original: err, data: buf.Bytes()} + } + return nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go b/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go new file mode 100644 index 0000000..831e13a --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go @@ -0,0 +1,23 @@ +package conf + +import ( + "os" + "strings" +) + +const Version = "7.13.0" + +const ( + CONTENT_TYPE_JSON = "application/json" + CONTENT_TYPE_FORM = "application/x-www-form-urlencoded" + CONTENT_TYPE_OCTET = "application/octet-stream" + CONTENT_TYPE_MULTIPART = "multipart/form-data" + + disableQiniuTimestampSignatureEnvKey = "DISABLE_QINIU_TIMESTAMP_SIGNATURE" +) + +func IsDisableQiniuTimestampSignature() bool { + value := os.Getenv(disableQiniuTimestampSignatureEnvKey) + value = strings.ToLower(value) + return value == "true" || value == "yes" || value == "y" || value == "1" +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/conf/doc.go b/vendor/github.com/qiniu/go-sdk/v7/conf/doc.go new file mode 100644 index 0000000..0c5bd7b --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/conf/doc.go @@ -0,0 +1,2 @@ +// conf 包提供了设置APP名称的方法。该APP名称会被放入API请求的UserAgent中,方便后续查询日志分析问题。 +package conf diff --git a/vendor/github.com/qiniu/go-sdk/v7/doc.go b/vendor/github.com/qiniu/go-sdk/v7/doc.go new file mode 100644 index 0000000..5614c6b --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/doc.go @@ -0,0 +1,12 @@ +/* + +包 github.com/qiniu/go-sdk 是七牛 Go 语言 SDK v7.x 版本。 + +主要提供了存储的数据上传,下载,管理以及CDN相关的功能。要求Go语言版本>=1.10.0。 + +Go SDK 中主要包含几个包: + +auth 包提供鉴权相关方法,conf 包提供配置相关方法,cdn包提供CDN相关的功能,storage包提供存储相关的功能。 + +*/ +package api diff --git a/vendor/github.com/qiniu/go-sdk/v7/err.go b/vendor/github.com/qiniu/go-sdk/v7/err.go new file mode 100644 index 0000000..4045ced --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/err.go @@ -0,0 +1,22 @@ +package api + +// QINIU SDK error type + +// 可以根据Code判断是何种类型错误 +type QError struct { + Code string + Message string +} + +// Error 继承error接口 +func (e *QError) Error() string { + return e.Code + ": " + e.Message +} + +// NewError 返回QError指针 +func NewError(code, message string) *QError { + return &QError{ + Code: code, + Message: message, + } +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/internal/log/logger.go b/vendor/github.com/qiniu/go-sdk/v7/internal/log/logger.go new file mode 100644 index 0000000..6cffaa3 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/internal/log/logger.go @@ -0,0 +1,76 @@ +// Package log只是SDK本身自己使用,用来调试代码使用,比如输出HTTP请求和响应信息 +package log + +import ( + "io" + "log" + "os" +) + +type Logger struct { + *log.Logger + level LogLevel +} + +// New 返回一个Logger 指针 +func New(out io.Writer, prefix string, flag int, level LogLevel) *Logger { + return &Logger{ + Logger: log.New(out, prefix, flag), + level: level, + } +} + +var ( + std = New(os.Stdout, DebugPrefix, log.LstdFlags, LogDebug) + info = New(os.Stdout, InfoPrefix, log.LstdFlags, LogInfo) + warn = New(os.Stdout, WarnPrefix, log.LstdFlags, LogWarn) +) + +type LogLevel int + +const ( + // LogDebug 调试模式 + LogDebug LogLevel = iota + + // Info + LogInfo + + // Warn + LogWarn +) + +const ( + InfoPrefix = "[I] " + DebugPrefix = "[D] " + WarnPrefix = "[W] " +) + +func (l *Logger) Info(v ...interface{}) { + l.output(LogInfo, v...) +} + +func (l *Logger) output(level LogLevel, v ...interface{}) { + if l.level <= level { + l.Logger.Println(v...) + } +} + +func (l *Logger) Debug(v ...interface{}) { + l.output(LogDebug, v...) +} + +func (l *Logger) Warn(v ...interface{}) { + l.output(LogWarn, v...) +} + +func Debug(v ...interface{}) { + std.Debug(v...) +} + +func Info(v ...interface{}) { + info.Info(v...) +} + +func Warn(v ...interface{}) { + warn.Warn(v...) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/reqid/reqid.go b/vendor/github.com/qiniu/go-sdk/v7/reqid/reqid.go new file mode 100644 index 0000000..0b9c764 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/reqid/reqid.go @@ -0,0 +1,20 @@ +package reqid + +import ( + "context" +) + +type reqidKey struct{} + +// WithReqid 把reqid加入context中 +func WithReqid(ctx context.Context, reqid string) context.Context { + return context.WithValue(ctx, reqidKey{}, reqid) +} + +// ReqidFromContext 从context中获取reqid +func ReqidFromContext(ctx context.Context) (reqid string, ok bool) { + reqid, ok = ctx.Value(reqidKey{}).(string) + return +} + +// -------------------------------------------------------------------- diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/backward_compatible.go b/vendor/github.com/qiniu/go-sdk/v7/storage/backward_compatible.go new file mode 100644 index 0000000..1f60cab --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/backward_compatible.go @@ -0,0 +1,33 @@ +// 原来rpc.go包含了客户端的信息,这个部分被调整到了"github.com/qiniu/go-sdk/v7/client" +// 这个文件的内容不应该再被使用 +// 客户端应该是所有服务公用的,包括kodo, cdn, dora, atlab等,不应该放在storage下 + +// 这个文件兼容保留了原来storage暴露出去的变量,函数等 +package storage + +import ( + "fmt" + "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/conf" + "runtime" +) + +var DefaultClient = client.DefaultClient +var UserAgent = client.UserAgent + +type Client = client.Client +type ErrorInfo = client.ErrorInfo + +var ResponseError = client.ResponseError +var CallRet = client.CallRet + +// var SetAppName = client.SetAppName +// SetAppName设置的是全局的变量,如果再这个包引入var SetAppName, 那么设置的实际上是 +// client包中的UserAgent, 所以为了兼容性重复定义了该函数 + +// userApp should be [A-Za-z0-9_\ \-\.]* +func SetAppName(userApp string) error { + UserAgent = fmt.Sprintf( + "QiniuGo/%s (%s; %s; %s) %s", conf.Version, runtime.GOOS, runtime.GOARCH, userApp, runtime.Version()) + return nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/base64_upload.go b/vendor/github.com/qiniu/go-sdk/v7/storage/base64_upload.go new file mode 100644 index 0000000..83191aa --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/base64_upload.go @@ -0,0 +1,152 @@ +package storage + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "hash/crc32" + "io" + "net/http" + "strconv" + "strings" + + "github.com/qiniu/go-sdk/v7/client" +) + +// Base64Uploader 表示一个Base64上传对象 +type Base64Uploader struct { + client *client.Client + cfg *Config +} + +// NewBase64Uploader 用来构建一个Base64上传的对象 +func NewBase64Uploader(cfg *Config) *Base64Uploader { + if cfg == nil { + cfg = &Config{} + } + + return &Base64Uploader{ + client: &client.DefaultClient, + cfg: cfg, + } +} + +// NewBase64UploaderEx 用来构建一个Base64上传的对象 +func NewBase64UploaderEx(cfg *Config, clt *client.Client) *Base64Uploader { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + + return &Base64Uploader{ + client: clt, + cfg: cfg, + } +} + +// Base64PutExtra 为Base64上传的额外可选项 +type Base64PutExtra struct { + // 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。 + Params map[string]string + + // 可选,当为 "" 时候,服务端自动判断。 + MimeType string +} + +// Put 用来以Base64方式上传一个文件 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 +// uptoken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// base64Data 是要上传的Base64数据,一般为图片数据的Base64编码字符串 +// extra 是上传的一些可选项,可以指定为nil。详细见 Base64PutExtra 结构的描述。 +// +func (p *Base64Uploader) Put( + ctx context.Context, ret interface{}, uptoken, key string, base64Data []byte, extra *Base64PutExtra) (err error) { + return p.put(ctx, ret, uptoken, key, true, base64Data, extra) +} + +// PutWithoutKey 用来以Base64方式上传一个文件,保存的文件名以文件的内容hash作为文件名 +func (p *Base64Uploader) PutWithoutKey( + ctx context.Context, ret interface{}, uptoken string, base64Data []byte, extra *Base64PutExtra) (err error) { + return p.put(ctx, ret, uptoken, "", false, base64Data, extra) +} + +func (p *Base64Uploader) put( + ctx context.Context, ret interface{}, uptoken, key string, hasKey bool, base64Data []byte, extra *Base64PutExtra) (err error) { + //get up host + ak, bucket, gErr := getAkBucketFromUploadToken(uptoken) + if gErr != nil { + err = gErr + return + } + + var upHost string + upHost, err = p.upHost(ak, bucket) + if err != nil { + return + } + + //set default extra + if extra == nil { + extra = &Base64PutExtra{} + } + + //calc crc32 + h := crc32.NewIEEE() + rawReader := base64.NewDecoder(base64.StdEncoding, bytes.NewReader(base64Data)) + fsize, decodeErr := io.Copy(h, rawReader) + if decodeErr != nil { + err = fmt.Errorf("invalid base64 data, %s", decodeErr.Error()) + return + } + fCrc32 := h.Sum32() + + postPath := bytes.NewBufferString("/putb64") + //add fsize + postPath.WriteString("/") + postPath.WriteString(strconv.Itoa(int(fsize))) + + //add key + if hasKey { + postPath.WriteString("/key/") + postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(key))) + } + //add mimeType + if extra.MimeType != "" { + postPath.WriteString("/mimeType/") + postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(extra.MimeType))) + } + + //add crc32 + postPath.WriteString("/crc32/") + postPath.WriteString(fmt.Sprintf("%d", fCrc32)) + + //add extra params + if len(extra.Params) > 0 { + for k, v := range extra.Params { + if strings.HasPrefix(k, "x:") && v != "" { + postPath.WriteString("/") + postPath.WriteString(k) + postPath.WriteString("/") + postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(v))) + } + } + } + + postURL := fmt.Sprintf("%s%s", upHost, postPath.String()) + headers := http.Header{} + headers.Add("Content-Type", "application/octet-stream") + headers.Add("Authorization", "UpToken "+uptoken) + + return p.client.CallWith(ctx, ret, "POST", postURL, headers, bytes.NewReader(base64Data), len(base64Data)) +} + +func (p *Base64Uploader) upHost(ak, bucket string) (upHost string, err error) { + return getUpHost(p.cfg, ak, bucket) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/bucket.go b/vendor/github.com/qiniu/go-sdk/v7/storage/bucket.go new file mode 100644 index 0000000..1374f02 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/bucket.go @@ -0,0 +1,999 @@ +package storage + +// TODO: +// BucketManager 每个接口的基本逻辑都是设置Mac信息, 获取请求地址, 发送HTTP请求。 +// 后期可以调整抽象出Request struct, APIOperation struct, 这样不用每个接口都要写 +// 重复的逻辑 + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/client" +) + +// 资源管理相关的默认域名 +const ( + DefaultRsHost = "rs.qiniu.com" + DefaultRsfHost = "rsf.qiniu.com" + DefaultAPIHost = "api.qiniu.com" + DefaultPubHost = "pu.qbox.me:10200" +) + +// FileInfo 文件基本信息 +type FileInfo struct { + + // 文件的HASH值,使用hash值算法计算。 + Hash string `json:"hash"` + + // 资源内容的大小,单位:字节。 + Fsize int64 `json:"fsize"` + + // 上传时间,单位:100纳秒,其值去掉低七位即为Unix时间戳。 + PutTime int64 `json:"putTime"` + + /** + * 归档/深度归档存储文件的解冻状态,归档/深度归档文件冻结时,不返回该字段。 + * 1 表示解冻中 + * 2 表示解冻完成 + */ + RestoreStatus int `json:"restoreStatus"` + + // 资源的 MIME 类型。 + MimeType string `json:"mimeType"` + + /** + * 资源的存储类型 + * 0 表示标准存储 + * 1 表示低频存储 + * 2 表示归档存储 + * 3 表示深度归档存储 + */ + Type int `json:"type"` + + /** + * 文件上传时设置的endUser + */ + EndUser string `json:"endUser"` + + /** + * 文件的存储状态,即禁用状态和启用状态间的的互相转换,请参考:文件状态。 + * 0 表示启用 + * 1 表示禁用 + */ + Status int `json:"status"` + + /** + * 文件的 md5 值 + */ + Md5 string `json:"md5"` + + /** + * 文件过期删除日期,int64 类型,Unix 时间戳格式,具体文件过期日期计算参考 生命周期管理。 + * 文件在设置过期时间后才会返回该字段(通过生命周期规则设置文件过期时间,仅对该功能发布后满足规则条件新上传文件返回该字段; + * 历史文件想要返回该字段需要在功能发布后可通过 修改文件过期删除时间 API 或者 修改文件生命周期 API 指定过期时间;对于已 + * 经设置过过期时间的历史文件,到期都会正常过期删除,只是服务端没有该字段返回) + * + * 例如:值为1568736000的时间,表示文件会在2019/9/18当天内删除。 + */ + Expiration int64 `json:"expiration"` + + /** + * 文件生命周期中转为低频存储的日期,int64 类型,Unix 时间戳格式 ,具体日期计算参考 生命周期管理。 + * 文件在设置转低频后才会返回该字段(通过生命周期规则设置文件转低频,仅对该功能发布后满足规则条件新上传文件返回该字段; + * 历史文件想要返回该字段需要在功能发布后可通过 修改文件生命周期 API 指定转低频时间;对于已经设置过转低频时间的历史文 + * 件,到期都会正常执行,只是服务端没有该字段返回) + * + * 例如:值为1568736000的时间,表示文件会在2019/9/18当天转为低频存储类型。 + */ + TransitionToIA int64 `json:"transitionToIA"` + + /** + * 文件生命周期中转为归档存储的日期,int64 类型,Unix 时间戳格式 ,具体日期计算参考 生命周期管理。 + * 文件在设置转归档后才会返回该字段(通过生命周期规则设置文件转归档,仅对该功能发布后满足规则条件新上传文件返回该字段; + * 历史文件想要返回该字段需要在功能发布后可通过 修改文件生命周期 API 指定转归档时间;对于已经设置过转归档时间的历史文 + * 件,到期都会正常执行,只是服务端没有该字段返回) + * + * 例如:值为1568736000的时间,表示文件会在2019/9/18当天转为归档存储类型。 + */ + TransitionToArchive int64 `json:"transitionToARCHIVE"` + + /** + * 文件生命周期中转为深度归档存储的日期,int64 类型,Unix 时间戳格式 ,具体日期计算参考 生命周期管理。 + * 文件在设置转深度归档后才会返回该字段(通过生命周期规则设置文件转深度归档,仅对该功能发布后满足规则条件新上传文件返回该字段; + * 历史文件想要返回该字段需要在功能发布后可通过 修改文件生命周期 API 指定转深度归档时间;对于已经设置过转深度归档时间的历史文 + * 件,到期都会正常执行,只是服务端没有该字段返回) + * + * 例如:值为1568736000的时间,表示文件会在2019/9/18当天转为深度归档存储类型。 + */ + TransitionToDeepArchive int64 `json:"transitionToDeepArchive"` + + // 分拣分片信息,可能为空 + Parts []int64 `json:"parts"` +} + +func (f *FileInfo) String() string { + str := "" + str += fmt.Sprintf("Hash: %s\n", f.Hash) + str += fmt.Sprintf("Fsize: %d\n", f.Fsize) + str += fmt.Sprintf("PutTime: %d\n", f.PutTime) + str += fmt.Sprintf("MimeType: %s\n", f.MimeType) + str += fmt.Sprintf("Type: %d\n", f.Type) + str += fmt.Sprintf("Status: %d\n", f.Status) + return str +} + +// FetchRet 资源抓取的返回值 +type FetchRet struct { + Hash string `json:"hash"` + Fsize int64 `json:"fsize"` + MimeType string `json:"mimeType"` + Key string `json:"key"` +} + +func (r *FetchRet) String() string { + str := "" + str += fmt.Sprintf("Key: %s\n", r.Key) + str += fmt.Sprintf("Hash: %s\n", r.Hash) + str += fmt.Sprintf("Fsize: %d\n", r.Fsize) + str += fmt.Sprintf("MimeType: %s\n", r.MimeType) + return str +} + +// BatchOpRet 为批量执行操作的返回值 +// 批量操作支持 stat,copy,delete,move,chgm,chtype,deleteAfterDays几个操作 +// 其中 stat 为获取文件的基本信息,如果文件存在则返回基本信息,如果文件不存在返回 error 。 +// 其他的操作,如果成功,则返回 code,不成功会同时返回 error 信息,可以根据 error 信息来判断问题所在。 +type BatchOpRet struct { + Code int `json:"code,omitempty"` + Data struct { + Hash string `json:"hash"` + Fsize int64 `json:"fsize"` + PutTime int64 `json:"putTime"` + MimeType string `json:"mimeType"` + Type int `json:"type"` + Error string `json:"error"` + } `json:"data,omitempty"` +} + +// BucketManager 提供了对资源进行管理的操作 +type BucketManager struct { + Client *client.Client + Mac *auth.Credentials + Cfg *Config +} + +// NewBucketManager 用来构建一个新的资源管理对象 +func NewBucketManager(mac *auth.Credentials, cfg *Config) *BucketManager { + if cfg == nil { + cfg = &Config{} + } + if cfg.CentralRsHost == "" { + cfg.CentralRsHost = DefaultRsHost + } + + return &BucketManager{ + Client: &client.DefaultClient, + Mac: mac, + Cfg: cfg, + } +} + +// NewBucketManagerEx 用来构建一个新的资源管理对象 +func NewBucketManagerEx(mac *auth.Credentials, cfg *Config, clt *client.Client) *BucketManager { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + if cfg.CentralRsHost == "" { + cfg.CentralRsHost = DefaultRsHost + } + + return &BucketManager{ + Client: clt, + Mac: mac, + Cfg: cfg, + } +} + +// UpdateObjectStatus 用来修改文件状态, 禁用和启用文件的可访问性 + +// 请求包: +// +// POST /chstatus//status/ +// status:0表示启用,1表示禁用 +// 返回包(JSON): +// +// 200 OK +// 当解析失败,返回400 Bad Request {"error":"invalid argument"} +// 当不符合UTF-8编码,返回400 Bad Request {"error":"key must be utf8 encoding"} +// 当文件不存在时,返回612 status code 612 {"error":"no such file or directory"} +// 当文件当前状态和设置的状态已经一致,返回400 {"error":"already enabled"}或400 {"error":"already disabled"} +func (m *BucketManager) UpdateObjectStatus(bucketName string, key string, enable bool) error { + var status string + ee := EncodedEntry(bucketName, key) + if enable { + status = "0" + } else { + status = "1" + } + path := fmt.Sprintf("/chstatus/%s/status/%s", ee, status) + + reqHost, reqErr := m.RsReqHost(bucketName) + if reqErr != nil { + return reqErr + } + reqURL := fmt.Sprintf("%s%s", reqHost, path) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// CreateBucket 创建一个七牛存储空间 +func (m *BucketManager) CreateBucket(bucketName string, regionID RegionID) error { + reqURL := fmt.Sprintf("%s/mkbucketv3/%s/region/%s", getUcHost(m.Cfg.UseHTTPS), bucketName, string(regionID)) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// Buckets 用来获取空间列表,如果指定了 shared 参数为 true,那么一同列表被授权访问的空间 +func (m *BucketManager) Buckets(shared bool) (buckets []string, err error) { + reqURL := fmt.Sprintf("%s/buckets?shared=%v", getUcHost(m.Cfg.UseHTTPS), shared) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &buckets, "POST", reqURL, nil) + return +} + +// DropBucket 删除七牛存储空间 +func (m *BucketManager) DropBucket(bucketName string) (err error) { + reqURL := fmt.Sprintf("%s/drop/%s", getUcHost(m.Cfg.UseHTTPS), bucketName) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// Stat 用来获取一个文件的基本信息 +func (m *BucketManager) Stat(bucket, key string) (FileInfo, error) { + return m.StatWithOpts(bucket, key, nil) +} + +type StatOpts struct { + NeedParts bool +} + +// StatWithParts 用来获取一个文件的基本信息以及分片信息 +func (m *BucketManager) StatWithOpts(bucket, key string, opt *StatOpts) (info FileInfo, err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + + reqURL := fmt.Sprintf("%s%s", reqHost, URIStat(bucket, key)) + if opt != nil { + if opt.NeedParts { + reqURL += "?needparts=true" + } + } + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &info, "POST", reqURL, nil) + return +} + +// Delete 用来删除空间中的一个文件 +func (m *BucketManager) Delete(bucket, key string) (err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, URIDelete(bucket, key)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// Copy 用来创建已有空间中的文件的一个新的副本 +func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { + reqHost, reqErr := m.RsReqHost(srcBucket) + if reqErr != nil { + err = reqErr + return + } + + reqURL := fmt.Sprintf("%s%s", reqHost, URICopy(srcBucket, srcKey, destBucket, destKey, force)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// Move 用来将空间中的一个文件移动到新的空间或者重命名 +func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { + reqHost, reqErr := m.RsReqHost(srcBucket) + if reqErr != nil { + err = reqErr + return + } + + reqURL := fmt.Sprintf("%s%s", reqHost, URIMove(srcBucket, srcKey, destBucket, destKey, force)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// ChangeMime 用来更新文件的MimeType +func (m *BucketManager) ChangeMime(bucket, key, newMime string) (err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMime(bucket, key, newMime)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// ChangeType 用来更新文件的存储类型,0 表示普通存储,1 表示低频存储,2 表示归档存储,3 表示深度归档存储 +func (m *BucketManager) ChangeType(bucket, key string, fileType int) (err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeType(bucket, key, fileType)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// RestoreAr 解冻归档存储类型的文件,可设置解冻有效期1~7天, 完成解冻任务通常需要1~5分钟 +func (m *BucketManager) RestoreAr(bucket, key string, freezeAfterDays int) (err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, URIRestoreAr(bucket, key, freezeAfterDays)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// DeleteAfterDays 用来更新文件生命周期,如果 days 设置为0,则表示取消文件的定期删除功能,永久存储 +func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error) { + reqHost, reqErr := m.RsReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + + reqURL := fmt.Sprintf("%s%s", reqHost, URIDeleteAfterDays(bucket, key, days)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// Batch 接口提供了资源管理的批量操作,支持 stat,copy,move,delete,chgm,chtype,deleteAfterDays几个接口 +func (m *BucketManager) Batch(operations []string) (batchOpRet []BatchOpRet, err error) { + if len(operations) > 1000 { + err = errors.New("batch operation count exceeds the limit of 1000") + return + } + scheme := "http://" + if m.Cfg.UseHTTPS { + scheme = "https://" + } + reqURL := fmt.Sprintf("%s%s/batch", scheme, m.Cfg.CentralRsHost) + params := map[string][]string{ + "op": operations, + } + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, &batchOpRet, "POST", reqURL, nil, params) + return +} + +// Fetch 根据提供的远程资源链接来抓取一个文件到空间并已指定文件名保存 +func (m *BucketManager) Fetch(resURL, bucket, key string) (fetchRet FetchRet, err error) { + reqHost, rErr := m.IoReqHost(bucket) + if rErr != nil { + err = rErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, uriFetch(resURL, bucket, key)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &fetchRet, "POST", reqURL, nil) + return +} + +func (m *BucketManager) RsReqHost(bucket string) (reqHost string, err error) { + var reqErr error + + if m.Cfg.RsHost == "" { + reqHost, reqErr = m.RsHost(bucket) + if reqErr != nil { + err = reqErr + return + } + } else { + reqHost = m.Cfg.RsHost + } + if !strings.HasPrefix(reqHost, "http") { + reqHost = "http://" + reqHost + } + return +} + +func (m *BucketManager) ApiReqHost(bucket string) (reqHost string, err error) { + var reqErr error + + if m.Cfg.ApiHost == "" { + reqHost, reqErr = m.ApiHost(bucket) + if reqErr != nil { + err = reqErr + return + } + } else { + reqHost = m.Cfg.ApiHost + } + if !strings.HasPrefix(reqHost, "http") { + reqHost = "http://" + reqHost + } + return +} + +func (m *BucketManager) RsfReqHost(bucket string) (reqHost string, err error) { + var reqErr error + + if m.Cfg.RsfHost == "" { + reqHost, reqErr = m.RsfHost(bucket) + if reqErr != nil { + err = reqErr + return + } + } else { + reqHost = m.Cfg.RsfHost + } + if !strings.HasPrefix(reqHost, "http") { + reqHost = "http://" + reqHost + } + return +} + +func (m *BucketManager) IoReqHost(bucket string) (reqHost string, err error) { + var reqErr error + + if m.Cfg.IoHost == "" { + reqHost, reqErr = m.IovipHost(bucket) + if reqErr != nil { + err = reqErr + return + } + } else { + reqHost = m.Cfg.IoHost + } + if !strings.HasPrefix(reqHost, "http") { + reqHost = "http://" + reqHost + } + return +} + +// FetchWithoutKey 根据提供的远程资源链接来抓取一个文件到空间并以文件的内容hash作为文件名 +func (m *BucketManager) FetchWithoutKey(resURL, bucket string) (fetchRet FetchRet, err error) { + reqHost, rErr := m.IoReqHost(bucket) + if rErr != nil { + err = rErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, uriFetchWithoutKey(resURL, bucket)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &fetchRet, "POST", reqURL, nil) + return +} + +// DomainInfo 是绑定在存储空间上的域名的具体信息 +type DomainInfo struct { + Domain string `json:"domain"` + + // 存储空间名字 + Tbl string `json:"tbl"` + + // 用户UID + Owner int `json:"uid"` + Refresh bool `json:"refresh"` + Ctime int `json:"ctime"` + Utime int `json:"utime"` +} + +// ListBucketDomains 返回绑定在存储空间中的域名信息 +func (m *BucketManager) ListBucketDomains(bucket string) (info []DomainInfo, err error) { + reqHost, err := m.z0ApiHost() + if err != nil { + return + } + reqURL := fmt.Sprintf("%s/v7/domain/list?tbl=%s", reqHost, bucket) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &info, "GET", reqURL, nil) + return +} + +// Prefetch 用来同步镜像空间的资源和镜像源资源内容 +func (m *BucketManager) Prefetch(bucket, key string) (err error) { + reqHost, reqErr := m.IoReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s%s", reqHost, uriPrefetch(bucket, key)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// SetImage 用来设置空间镜像源 +func (m *BucketManager) SetImage(siteURL, bucket string) (err error) { + reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriSetImage(siteURL, bucket)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// SetImageWithHost 用来设置空间镜像源,额外添加回源Host头部 +func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err error) { + reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, + uriSetImageWithHost(siteURL, bucket, host)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// UnsetImage 用来取消空间镜像源设置 +func (m *BucketManager) UnsetImage(bucket string) (err error) { + reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriUnsetImage(bucket)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return err +} + +// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,循环列举的时候下次 +// 列举的位置 marker,以及每次返回的文件的最大数量limit,其中limit最大为1000。 +func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string, + limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) { + if limit <= 0 || limit > 1000 { + err = errors.New("invalid list limit, only allow [1, 1000]") + return + } + + reqHost, reqErr := m.RsfReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + + ret := listFilesRet{} + reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles(bucket, prefix, delimiter, marker, limit)) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &ret, "POST", reqURL, nil) + if err != nil { + return + } + + commonPrefixes = ret.CommonPrefixes + nextMarker = ret.Marker + entries = ret.Items + if ret.Marker != "" { + hasNext = true + } + + return +} + +// ListBucket 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,流式返回每条数据。 +func (m *BucketManager) ListBucket(bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) { + + ctx := auth.WithCredentialsType(context.Background(), m.Mac, auth.TokenQiniu) + reqHost, reqErr := m.RsfReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + + // limit 0 ==> 列举所有文件 + reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker)) + retCh, err = callChan(m.Client, ctx, "POST", reqURL, nil) + return +} + +// ListBucketContext 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,流式返回每条数据。 +// 接受的context可以用来取消列举操作 +func (m *BucketManager) ListBucketContext(ctx context.Context, bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) { + + ctx = auth.WithCredentialsType(ctx, m.Mac, auth.TokenQiniu) + reqHost, reqErr := m.RsfReqHost(bucket) + if reqErr != nil { + err = reqErr + return + } + + // limit 0 ==> 列举所有文件 + reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker)) + retCh, err = callChan(m.Client, ctx, "POST", reqURL, nil) + return +} + +type AsyncFetchParam struct { + Url string `json:"url"` + Host string `json:"host,omitempty"` + Bucket string `json:"bucket"` + Key string `json:"key,omitempty"` + Md5 string `json:"md5,omitempty"` + Etag string `json:"etag,omitempty"` + CallbackURL string `json:"callbackurl,omitempty"` + CallbackBody string `json:"callbackbody,omitempty"` + CallbackBodyType string `json:"callbackbodytype,omitempty"` + FileType int `json:"file_type,omitempty"` +} + +type AsyncFetchRet struct { + Id string `json:"id"` + Wait int `json:"wait"` +} + +func (m *BucketManager) AsyncFetch(param AsyncFetchParam) (ret AsyncFetchRet, err error) { + + reqUrl, err := m.ApiReqHost(param.Bucket) + if err != nil { + return + } + + reqUrl += "/sisyphus/fetch" + + err = m.Client.CredentialedCallWithJson(context.Background(), m.Mac, auth.TokenQiniu, &ret, "POST", reqUrl, nil, param) + return +} + +func (m *BucketManager) RsHost(bucket string) (rsHost string, err error) { + zone, err := m.Zone(bucket) + if err != nil { + return + } + + rsHost = zone.GetRsHost(m.Cfg.UseHTTPS) + return +} + +func (m *BucketManager) RsfHost(bucket string) (rsfHost string, err error) { + zone, err := m.Zone(bucket) + if err != nil { + return + } + + rsfHost = zone.GetRsfHost(m.Cfg.UseHTTPS) + return +} + +func (m *BucketManager) IovipHost(bucket string) (iovipHost string, err error) { + zone, err := m.Zone(bucket) + if err != nil { + return + } + + iovipHost = zone.GetIoHost(m.Cfg.UseHTTPS) + return +} + +func (m *BucketManager) ApiHost(bucket string) (apiHost string, err error) { + zone, err := m.Zone(bucket) + if err != nil { + return + } + + apiHost = zone.GetApiHost(m.Cfg.UseHTTPS) + return +} + +func (m *BucketManager) z0ApiHost() (apiHost string, err error) { + region, err := getRegionByRegionId("z0", m.Mac) + if err != nil { + return + } + + apiHost = region.GetApiHost(m.Cfg.UseHTTPS) + return +} + +func (m *BucketManager) Zone(bucket string) (z *Zone, err error) { + + if m.Cfg.Zone != nil { + z = m.Cfg.Zone + return + } + + z, err = GetZone(m.Mac.AccessKey, bucket) + return +} + +// 构建op的方法,导出的方法支持在Batch操作中使用 + +// URIStat 构建 stat 接口的请求命令 +func URIStat(bucket, key string) string { + return fmt.Sprintf("/stat/%s", EncodedEntry(bucket, key)) +} + +// URIDelete 构建 delete 接口的请求命令 +func URIDelete(bucket, key string) string { + return fmt.Sprintf("/delete/%s", EncodedEntry(bucket, key)) +} + +// URICopy 构建 copy 接口的请求命令 +func URICopy(srcBucket, srcKey, destBucket, destKey string, force bool) string { + return fmt.Sprintf("/copy/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey), + EncodedEntry(destBucket, destKey), force) +} + +// URIMove 构建 move 接口的请求命令 +func URIMove(srcBucket, srcKey, destBucket, destKey string, force bool) string { + return fmt.Sprintf("/move/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey), + EncodedEntry(destBucket, destKey), force) +} + +// URIDeleteAfterDays 构建 deleteAfterDays 接口的请求命令 +func URIDeleteAfterDays(bucket, key string, days int) string { + return fmt.Sprintf("/deleteAfterDays/%s/%d", EncodedEntry(bucket, key), days) +} + +// URIChangeMime 构建 chgm 接口的请求命令 +func URIChangeMime(bucket, key, newMime string) string { + return fmt.Sprintf("/chgm/%s/mime/%s", EncodedEntry(bucket, key), + base64.URLEncoding.EncodeToString([]byte(newMime))) +} + +// URIChangeType 构建 chtype 接口的请求命令 +func URIChangeType(bucket, key string, fileType int) string { + return fmt.Sprintf("/chtype/%s/type/%d", EncodedEntry(bucket, key), fileType) +} + +// URIRestoreAr 构建 restoreAr 接口的请求命令 +func URIRestoreAr(bucket, key string, afterDay int) string { + return fmt.Sprintf("/restoreAr/%s/freezeAfterDays/%d", EncodedEntry(bucket, key), afterDay) +} + +// 构建op的方法,非导出的方法无法用在Batch操作中 +func uriFetch(resURL, bucket, key string) string { + return fmt.Sprintf("/fetch/%s/to/%s", + base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntry(bucket, key)) +} + +func uriFetchWithoutKey(resURL, bucket string) string { + return fmt.Sprintf("/fetch/%s/to/%s", + base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntryWithoutKey(bucket)) +} + +func uriPrefetch(bucket, key string) string { + return fmt.Sprintf("/prefetch/%s", EncodedEntry(bucket, key)) +} + +func uriSetImage(siteURL, bucket string) string { + return fmt.Sprintf("/image/%s/from/%s", bucket, + base64.URLEncoding.EncodeToString([]byte(siteURL))) +} + +func uriSetImageWithHost(siteURL, bucket, host string) string { + return fmt.Sprintf("/image/%s/from/%s/host/%s", bucket, + base64.URLEncoding.EncodeToString([]byte(siteURL)), + base64.URLEncoding.EncodeToString([]byte(host))) +} + +func uriUnsetImage(bucket string) string { + return fmt.Sprintf("/unimage/%s", bucket) +} + +func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string { + query := make(url.Values) + query.Add("bucket", bucket) + if prefix != "" { + query.Add("prefix", prefix) + } + if delimiter != "" { + query.Add("delimiter", delimiter) + } + if marker != "" { + query.Add("marker", marker) + } + if limit > 0 { + query.Add("limit", strconv.FormatInt(int64(limit), 10)) + } + return fmt.Sprintf("/list?%s", query.Encode()) +} + +func uriListFiles2(bucket, prefix, delimiter, marker string) string { + query := make(url.Values) + query.Add("bucket", bucket) + if prefix != "" { + query.Add("prefix", prefix) + } + if delimiter != "" { + query.Add("delimiter", delimiter) + } + if marker != "" { + query.Add("marker", marker) + } + return fmt.Sprintf("/v2/list?%s", query.Encode()) +} + +// EncodedEntry 生成URL Safe Base64编码的 Entry +func EncodedEntry(bucket, key string) string { + entry := fmt.Sprintf("%s:%s", bucket, key) + return base64.URLEncoding.EncodeToString([]byte(entry)) +} + +// EncodedEntryWithoutKey 生成 key 为null的情况下 URL Safe Base64编码的Entry +func EncodedEntryWithoutKey(bucket string) string { + return base64.URLEncoding.EncodeToString([]byte(bucket)) +} + +// MakePublicURL 用来生成公开空间资源下载链接,注意该方法并不会对 key 进行 escape +func MakePublicURL(domain, key string) (finalUrl string) { + domain = strings.TrimRight(domain, "/") + srcUrl := fmt.Sprintf("%s/%s", domain, key) + srcUri, _ := url.Parse(srcUrl) + finalUrl = srcUri.String() + return +} + +// MakePublicURLv2 用来生成公开空间资源下载链接,并且该方法确保 key 将会被 escape +func MakePublicURLv2(domain, key string) string { + return MakePublicURLv2WithQuery(domain, key, nil) +} + +// MakePublicURLv2WithQuery 用来生成公开空间资源下载链接,并且该方法确保 key 将会被 escape,并在 URL 后追加经过编码的查询参数 +func MakePublicURLv2WithQuery(domain, key string, query url.Values) string { + var rawQuery string + if query != nil { + rawQuery = query.Encode() + } + return makePublicURLv2WithRawQuery(domain, key, rawQuery) +} + +// MakePublicURLv2WithQueryString 用来生成公开空间资源下载链接,并且该方法确保 key 将会被 escape,并在 URL 后直接追加查询参数 +func makePublicURLv2WithQueryString(domain, key, query string) string { + return makePublicURLv2WithRawQuery(domain, key, urlEncodeQuery(query)) +} + +func makePublicURLv2WithRawQuery(domain, key, rawQuery string) string { + domain = strings.TrimRight(domain, "/") + srcUrl := fmt.Sprintf("%s/%s", domain, urlEncodeQuery(key)) + if rawQuery != "" { + srcUrl += "?" + rawQuery + } + srcUri, _ := url.Parse(srcUrl) + return srcUri.String() +} + +// MakePrivateURL 用来生成私有空间资源下载链接,注意该方法并不会对 key 进行 escape +func MakePrivateURL(mac *auth.Credentials, domain, key string, deadline int64) (privateURL string) { + publicURL := MakePublicURL(domain, key) + urlToSign := publicURL + if strings.Contains(publicURL, "?") { + urlToSign = fmt.Sprintf("%s&e=%d", urlToSign, deadline) + } else { + urlToSign = fmt.Sprintf("%s?e=%d", urlToSign, deadline) + } + token := mac.Sign([]byte(urlToSign)) + privateURL = fmt.Sprintf("%s&token=%s", urlToSign, token) + return +} + +// MakePrivateURLv2 用来生成私有空间资源下载链接,并且该方法确保 key 将会被 escape +func MakePrivateURLv2(mac *auth.Credentials, domain, key string, deadline int64) (privateURL string) { + return MakePrivateURLv2WithQuery(mac, domain, key, nil, deadline) +} + +// MakePrivateURLv2WithQuery 用来生成私有空间资源下载链接,并且该方法确保 key 将会被 escape,并在 URL 后追加经过编码的查询参数 +func MakePrivateURLv2WithQuery(mac *auth.Credentials, domain, key string, query url.Values, deadline int64) (privateURL string) { + var rawQuery string + if query != nil { + rawQuery = query.Encode() + } + return makePrivateURLv2WithRawQuery(mac, domain, key, rawQuery, deadline) +} + +// MakePrivateURLv2WithQueryString 用来生成私有空间资源下载链接,并且该方法确保 key 将会被 escape,并在 URL 后直接追加查询参数 +func MakePrivateURLv2WithQueryString(mac *auth.Credentials, domain, key, query string, deadline int64) (privateURL string) { + return makePrivateURLv2WithRawQuery(mac, domain, key, urlEncodeQuery(query), deadline) +} + +func makePrivateURLv2WithRawQuery(mac *auth.Credentials, domain, key, rawQuery string, deadline int64) (privateURL string) { + publicURL := makePublicURLv2WithRawQuery(domain, key, rawQuery) + urlToSign := publicURL + if strings.Contains(publicURL, "?") { + urlToSign = fmt.Sprintf("%s&e=%d", urlToSign, deadline) + } else { + urlToSign = fmt.Sprintf("%s?e=%d", urlToSign, deadline) + } + token := mac.Sign([]byte(urlToSign)) + privateURL = fmt.Sprintf("%s&token=%s", urlToSign, token) + return +} + +func urlEncodeQuery(str string) (ret string) { + str = url.QueryEscape(str) + str = strings.Replace(str, "%2F", "/", -1) + str = strings.Replace(str, "%7C", "|", -1) + str = strings.Replace(str, "+", "%20", -1) + return str +} + +type listFilesRet2 struct { + Marker string `json:"marker"` + Item ListItem `json:"item"` + Dir string `json:"dir"` +} + +type listFilesRet struct { + Marker string `json:"marker"` + Items []ListItem `json:"items"` + CommonPrefixes []string `json:"commonPrefixes"` +} + +// ListItem 为文件列举的返回值 +type ListItem struct { + Key string `json:"key"` + Hash string `json:"hash"` + Fsize int64 `json:"fsize"` + PutTime int64 `json:"putTime"` + MimeType string `json:"mimeType"` + Type int `json:"type"` + EndUser string `json:"endUser"` +} + +// 接口可能返回空的记录 +func (l *ListItem) IsEmpty() (empty bool) { + return l.Key == "" && l.Hash == "" && l.Fsize == 0 && l.PutTime == 0 +} + +func (l *ListItem) String() string { + str := "" + str += fmt.Sprintf("Hash: %s\n", l.Hash) + str += fmt.Sprintf("Fsize: %d\n", l.Fsize) + str += fmt.Sprintf("PutTime: %d\n", l.PutTime) + str += fmt.Sprintf("MimeType: %s\n", l.MimeType) + str += fmt.Sprintf("Type: %d\n", l.Type) + str += fmt.Sprintf("EndUser: %s\n", l.EndUser) + return str +} + +func callChan(r *client.Client, ctx context.Context, method, reqUrl string, headers http.Header) (chan listFilesRet2, error) { + + resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0) + if err != nil { + return nil, err + } + if resp.StatusCode/100 != 2 { + return nil, client.ResponseError(resp) + } + return callRetChan(ctx, resp) +} + +func callRetChan(ctx context.Context, resp *http.Response) (retCh chan listFilesRet2, err error) { + + retCh = make(chan listFilesRet2) + if resp.StatusCode/100 != 2 { + return nil, client.ResponseError(resp) + } + + go func() { + defer resp.Body.Close() + defer close(retCh) + + dec := json.NewDecoder(resp.Body) + var ret listFilesRet2 + + for { + err = dec.Decode(&ret) + if err != nil { + if err != io.EOF { + fmt.Fprintf(os.Stderr, "decode error: %v\n", err) + } + return + } + select { + case <-ctx.Done(): + return + case retCh <- ret: + } + } + }() + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/config.go b/vendor/github.com/qiniu/go-sdk/v7/storage/config.go new file mode 100644 index 0000000..43cd9f4 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/config.go @@ -0,0 +1,85 @@ +package storage + +// Config 为文件上传,资源管理等配置 +type Config struct { + //兼容保留 + Zone *Region //空间所在的存储区域 + + Region *Region + + // 如果设置的Host本身是以http://开头的,又设置了该字段为true,那么优先使用该字段,使用https协议 + // 同理如果该字段为false, 但是设置的host以https开头,那么使用http协议通信 + UseHTTPS bool //是否使用https域名 + UseCdnDomains bool //是否使用cdn加速域名 + CentralRsHost string //中心机房的RsHost,用于list bucket + + // 兼容保留 + RsHost string + RsfHost string + UpHost string + ApiHost string + IoHost string +} + +// reqHost 返回一个Host链接 +// 主要用于Config 中Host的获取,Region优先级最高, Zone次之, 最后才使用设置的Host信息 +// topHost是优先级最高的, host次之,如果都没有,使用默认的defaultHost +func reqHost(useHttps bool, topHost, host, defaultHost string) (endp string) { + if topHost != "" { + return topHost + } + if host == "" { + host = defaultHost + } + return endpoint(useHttps, host) +} + +// 获取RsHost +// 优先使用Zone中的Host信息,如果Zone中的host信息没有配置,那么使用Config中的Host信息 +func (c *Config) RsReqHost() string { + rzHost := c.hostFromRegion("rs") + return reqHost(c.UseHTTPS, rzHost, c.RsHost, DefaultRsHost) +} + +// GetRegion返回一个Region指针 +// 默认返回最新的Region, 如果该字段没有,那么返回兼容保留的Zone, 如果都为nil, 就返回nil +func (c *Config) GetRegion() *Region { + if c.Region != nil { + return c.Region + } + if c.Zone != nil { + return c.Zone + } + return nil +} + +func (c *Config) hostFromRegion(typ string) string { + region := c.GetRegion() + if region != nil { + switch typ { + case "rs": + return region.GetRsHost(c.UseHTTPS) + case "rsf": + return region.GetRsfHost(c.UseHTTPS) + case "api": + return region.GetApiHost(c.UseHTTPS) + case "io": + return region.GetIoHost(c.UseHTTPS) + } + } + return "" +} + +// 获取rsfHost +// 优先使用Zone中的Host信息,如果Zone中的host信息没有配置,那么使用Config中的Host信息 +func (c *Config) RsfReqHost() string { + rsHost := c.hostFromRegion("rsf") + return reqHost(c.UseHTTPS, rsHost, c.RsfHost, DefaultRsfHost) +} + +// 获取apiHost +// 优先使用Zone中的Host信息,如果Zone中的host信息没有配置,那么使用Config中的Host信息 +func (c *Config) ApiReqHost() string { + rzHost := c.hostFromRegion("api") + return reqHost(c.UseHTTPS, rzHost, c.ApiHost, DefaultAPIHost) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/doc.go b/vendor/github.com/qiniu/go-sdk/v7/storage/doc.go new file mode 100644 index 0000000..13e3305 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/doc.go @@ -0,0 +1,10 @@ +// storage 包提供了资源的上传,管理,数据处理等功能。其中资源的上传又提供了表单上传的方式以及分片上传的方式,其中分片上传的方式还支持断点续传。 +// +// 该包中提供了 BucketManager 用来进行资源管理,比如获取文件信息,文件复制,删除,重命名等,以及很多高级功能如修改文件类型, +// 修改文件的生命周期,修改文件的存储类型等。 +// +// 该包中还提供了 FormUploader 和 ResumeUploader 来分别支持表单上传和分片上传,断点续传等功能,对于较大的文件,比如100MB以上的文件,一般 +// 建议采用分片上传的方式来保证上传的效率和可靠性。 +// +// 对于数据处理,则提供了 OperationManager,可以使用它来发送持久化数据处理请求,及查询数据处理的状态。 +package storage diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/errs.go b/vendor/github.com/qiniu/go-sdk/v7/storage/errs.go new file mode 100644 index 0000000..5014c65 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/errs.go @@ -0,0 +1,13 @@ +package storage + +import ( + "errors" +) + +var ( + // ErrBucketNotExist 用户存储空间不存在 + ErrBucketNotExist = errors.New("bucket not exist") + + // ErrNoSuchFile 文件已经存在 + ErrNoSuchFile = errors.New("No such file or directory") +) diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/form_upload.go b/vendor/github.com/qiniu/go-sdk/v7/storage/form_upload.go new file mode 100644 index 0000000..6be69cd --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/form_upload.go @@ -0,0 +1,364 @@ +package storage + +import ( + "bytes" + "context" + "fmt" + "hash" + "hash/crc32" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "path" + "path/filepath" + "strings" + + "github.com/qiniu/go-sdk/v7/client" +) + +// PutExtra 为表单上传的额外可选项 +type PutExtra struct { + // 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。 + Params map[string]string + + UpHost string + + // 可选,当为 "" 时候,服务端自动判断。 + MimeType string + + // 上传事件:进度通知。这个事件的回调函数应该尽可能快地结束。 + OnProgress func(fsize, uploaded int64) +} + +func (extra *PutExtra) getUpHost(useHttps bool) string { + return hostAddSchemeIfNeeded(useHttps, extra.UpHost) +} + +// PutRet 为七牛标准的上传回复内容。 +// 如果使用了上传回调或者自定义了returnBody,那么需要根据实际情况,自己自定义一个返回值结构体 +type PutRet struct { + Hash string `json:"hash"` + PersistentID string `json:"persistentId"` + Key string `json:"key"` +} + +// FormUploader 表示一个表单上传的对象 +type FormUploader struct { + Client *client.Client + Cfg *Config +} + +// NewFormUploader 用来构建一个表单上传的对象 +func NewFormUploader(cfg *Config) *FormUploader { + if cfg == nil { + cfg = &Config{} + } + + return &FormUploader{ + Client: &client.DefaultClient, + Cfg: cfg, + } +} + +// NewFormUploaderEx 用来构建一个表单上传的对象 +func NewFormUploaderEx(cfg *Config, clt *client.Client) *FormUploader { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + + return &FormUploader{ + Client: clt, + Cfg: cfg, + } +} + +// PutFile 用来以表单方式上传一个文件,和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 +// uptoken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项,可以指定为nil。详细见 PutExtra 结构的描述。 +// +func (p *FormUploader) PutFile( + ctx context.Context, ret interface{}, uptoken, key, localFile string, extra *PutExtra) (err error) { + return p.putFile(ctx, ret, uptoken, key, true, localFile, extra) +} + +// PutFileWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下,文件命名方式首先看看 +// uptoken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// uptoken 是由业务服务器颁发的上传凭证。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。 +// +func (p *FormUploader) PutFileWithoutKey( + ctx context.Context, ret interface{}, uptoken, localFile string, extra *PutExtra) (err error) { + return p.putFile(ctx, ret, uptoken, "", false, localFile, extra) +} + +func (p *FormUploader) putFile( + ctx context.Context, ret interface{}, uptoken string, + key string, hasKey bool, localFile string, extra *PutExtra) (err error) { + + f, err := os.Open(localFile) + if err != nil { + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return + } + fsize := fi.Size() + + if extra == nil { + extra = &PutExtra{} + } + + return p.put(ctx, ret, uptoken, key, hasKey, f, fsize, extra, filepath.Base(localFile)) +} + +// Put 用来以表单方式上传一个文件。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 +// uptoken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// data 是文件内容的访问接口(io.Reader)。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。 +// +func (p *FormUploader) Put( + ctx context.Context, ret interface{}, uptoken, key string, data io.Reader, size int64, extra *PutExtra) (err error) { + err = p.put(ctx, ret, uptoken, key, true, data, size, extra, path.Base(key)) + return +} + +// PutWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下,文件命名方式首先看看 uptoken 中是否设置了 saveKey, +// 如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// uptoken 是由业务服务器颁发的上传凭证。 +// data 是文件内容的访问接口(io.Reader)。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。详细见 PutExtra 结构的描述。 +// +func (p *FormUploader) PutWithoutKey( + ctx context.Context, ret interface{}, uptoken string, data io.Reader, size int64, extra *PutExtra) (err error) { + err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "filename") + return err +} + +func (p *FormUploader) put( + ctx context.Context, ret interface{}, uptoken string, + key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) (err error) { + + var upHost string + if extra == nil { + extra = &PutExtra{} + } + if extra.UpHost != "" { + upHost = extra.getUpHost(p.Cfg.UseHTTPS) + } else if upHost, err = p.getUpHostFromUploadToken(uptoken); err != nil { + return + } + + b := new(bytes.Buffer) + writer := multipart.NewWriter(b) + err = writeMultipart(writer, uptoken, key, hasKey, extra, fileName) + if err != nil { + return + } + + var dataReader io.Reader + h := crc32.NewIEEE() + dataReader = io.TeeReader(data, h) + crcReader := newCrc32Reader(writer.Boundary(), h) + h = nil + //write file + head := make(textproto.MIMEHeader) + head.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, + escapeQuotes(fileName))) + if extra.MimeType != "" { + head.Set("Content-Type", extra.MimeType) + } + + _, err = writer.CreatePart(head) + if err != nil { + return + } + head = nil + + lastLine := fmt.Sprintf("\r\n--%s--\r\n", writer.Boundary()) + r := strings.NewReader(lastLine) + + bodyLen := int64(-1) + if size >= 0 { + bodyLen = int64(b.Len()) + size + int64(len(lastLine)) + bodyLen += crcReader.length() + } + + mr := io.MultiReader(b, dataReader, crcReader, r) + b = nil + dataReader = nil + crcReader = nil + r = nil + + formBytes, err := ioutil.ReadAll(mr) + if err != nil { + return + } + mr = nil + + getBodyReader := func() (io.Reader, error) { + var formReader io.Reader = bytes.NewReader(formBytes) + if extra.OnProgress != nil { + formReader = &readerWithProgress{reader: formReader, fsize: size, onProgress: extra.OnProgress} + } + return formReader, nil + } + getBodyReadCloser := func() (io.ReadCloser, error) { + reader, err := getBodyReader() + if err != nil { + return nil, err + } + return ioutil.NopCloser(reader), nil + } + bodyReader, err := getBodyReader() + if err != nil { + return + } + + contentType := writer.FormDataContentType() + headers := http.Header{} + headers.Add("Content-Type", contentType) + err = p.Client.CallWithBodyGetter(ctx, ret, "POST", upHost, headers, bodyReader, getBodyReadCloser, bodyLen) + if err != nil { + return + } + if extra.OnProgress != nil { + extra.OnProgress(size, size) + } + + return +} + +func (p *FormUploader) getUpHostFromUploadToken(upToken string) (upHost string, err error) { + var ak, bucket string + + if ak, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + return + } + upHost, err = p.UpHost(ak, bucket) + return +} + +type crc32Reader struct { + h hash.Hash32 + boundary string + r io.Reader + inited bool + nlDashBoundaryNl string + header string + crc32PadLen int64 +} + +func newCrc32Reader(boundary string, h hash.Hash32) *crc32Reader { + nlDashBoundaryNl := fmt.Sprintf("\r\n--%s\r\n", boundary) + header := `Content-Disposition: form-data; name="crc32"` + "\r\n\r\n" + return &crc32Reader{ + h: h, + boundary: boundary, + nlDashBoundaryNl: nlDashBoundaryNl, + header: header, + crc32PadLen: 10, + } +} + +func (r *crc32Reader) Read(p []byte) (int, error) { + if !r.inited { + crc32Sum := r.h.Sum32() + crc32Line := r.nlDashBoundaryNl + r.header + fmt.Sprintf("%010d", crc32Sum) //padding crc32 results to 10 digits + r.r = strings.NewReader(crc32Line) + r.inited = true + } + return r.r.Read(p) +} + +func (r crc32Reader) length() (length int64) { + return int64(len(r.nlDashBoundaryNl+r.header)) + r.crc32PadLen +} + +func (p *FormUploader) UpHost(ak, bucket string) (upHost string, err error) { + return getUpHost(p.Cfg, ak, bucket) +} + +type readerWithProgress struct { + reader io.Reader + uploaded int64 + fsize int64 + onProgress func(fsize, uploaded int64) +} + +func (p *readerWithProgress) Read(b []byte) (n int, err error) { + if p.uploaded > 0 { + p.onProgress(p.fsize, p.uploaded) + } + + n, err = p.reader.Read(b) + p.uploaded += int64(n) + if p.uploaded > p.fsize { + p.uploaded = p.fsize + } + return +} + +func writeMultipart(writer *multipart.Writer, uptoken, key string, hasKey bool, + extra *PutExtra, fileName string) (err error) { + + //token + if err = writer.WriteField("token", uptoken); err != nil { + return + } + + //key + if hasKey { + if err = writer.WriteField("key", key); err != nil { + return + } + } + + //extra.Params + if extra.Params != nil { + for k, v := range extra.Params { + if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { + err = writer.WriteField(k, v) + if err != nil { + return + } + } + } + } + + return err +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/pfop.go b/vendor/github.com/qiniu/go-sdk/v7/storage/pfop.go new file mode 100644 index 0000000..9ad3100 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/pfop.go @@ -0,0 +1,212 @@ +package storage + +import ( + "context" + "fmt" + "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/conf" + "net/http" +) + +// OperationManager 提供了数据处理相关的方法 +type OperationManager struct { + Client *client.Client + Mac *auth.Credentials + Cfg *Config +} + +// NewOperationManager 用来构建一个新的数据处理对象 +func NewOperationManager(mac *auth.Credentials, cfg *Config) *OperationManager { + if cfg == nil { + cfg = &Config{} + } + + return &OperationManager{ + Client: &client.DefaultClient, + Mac: mac, + Cfg: cfg, + } +} + +// NewOperationManager 用来构建一个新的数据处理对象 +func NewOperationManagerEx(mac *auth.Credentials, cfg *Config, clt *client.Client) *OperationManager { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + + return &OperationManager{ + Client: clt, + Mac: mac, + Cfg: cfg, + } +} + +// PfopRet 为数据处理请求的回复内容 +type PfopRet struct { + PersistentID string `json:"persistentId,omitempty"` +} + +// PrefopRet 为数据处理请求的状态查询回复内容 +type PrefopRet struct { + ID string `json:"id"` + Code int `json:"code"` + Desc string `json:"desc"` + InputBucket string `json:"inputBucket,omitempty"` + InputKey string `json:"inputKey,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + Reqid string `json:"reqid,omitempty"` + Items []FopResult +} + +func (r *PrefopRet) String() string { + strData := fmt.Sprintf("Id: %s\r\nCode: %d\r\nDesc: %s\r\n", r.ID, r.Code, r.Desc) + if r.InputBucket != "" { + strData += fmt.Sprintln(fmt.Sprintf("InputBucket: %s", r.InputBucket)) + } + if r.InputKey != "" { + strData += fmt.Sprintln(fmt.Sprintf("InputKey: %s", r.InputKey)) + } + if r.Pipeline != "" { + strData += fmt.Sprintln(fmt.Sprintf("Pipeline: %s", r.Pipeline)) + } + if r.Reqid != "" { + strData += fmt.Sprintln(fmt.Sprintf("Reqid: %s", r.Reqid)) + } + + strData = fmt.Sprintln(strData) + for _, item := range r.Items { + strData += fmt.Sprintf("\tCmd:\t%s\r\n\tCode:\t%d\r\n\tDesc:\t%s\r\n", item.Cmd, item.Code, item.Desc) + if item.Error != "" { + strData += fmt.Sprintf("\tError:\t%s\r\n", item.Error) + } else { + if item.Hash != "" { + strData += fmt.Sprintf("\tHash:\t%s\r\n", item.Hash) + } + if item.Key != "" { + strData += fmt.Sprintf("\tKey:\t%s\r\n", item.Key) + } + if item.Keys != nil { + if len(item.Keys) > 0 { + strData += "\tKeys: {\r\n" + for _, key := range item.Keys { + strData += fmt.Sprintf("\t\t%s\r\n", key) + } + strData += "\t}\r\n" + } + } + } + strData += "\r\n" + } + return strData +} + +// FopResult 云处理操作列表,包含每个云处理操作的状态信息 +type FopResult struct { + Cmd string `json:"cmd"` + Code int `json:"code"` + Desc string `json:"desc"` + Error string `json:"error,omitempty"` + Hash string `json:"hash,omitempty"` + Key string `json:"key,omitempty"` + Keys []string `json:"keys,omitempty"` +} + +// Pfop 持久化数据处理 +// +// bucket 资源空间 +// key 源资源名 +// fops 云处理操作列表, +// notifyURL 处理结果通知接收URL +// pipeline 多媒体处理队列名称 +// force 强制执行数据处理 +// +func (m *OperationManager) Pfop(bucket, key, fops, pipeline, notifyURL string, + force bool) (persistentID string, err error) { + pfopParams := map[string][]string{ + "bucket": {bucket}, + "key": {key}, + "fops": {fops}, + } + + if pipeline != "" { + pfopParams["pipeline"] = []string{pipeline} + } + + if notifyURL != "" { + pfopParams["notifyURL"] = []string{notifyURL} + } + + if force { + pfopParams["force"] = []string{"1"} + } + var ret PfopRet + ctx := auth.WithCredentials(context.TODO(), m.Mac) + reqHost, reqErr := m.ApiHost(bucket) + if reqErr != nil { + err = reqErr + return + } + reqURL := fmt.Sprintf("%s/pfop/", reqHost) + headers := http.Header{} + headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) + err = m.Client.CallWithForm(ctx, &ret, "POST", reqURL, headers, pfopParams) + if err != nil { + return + } + + persistentID = ret.PersistentID + return +} + +// Prefop 持久化处理状态查询 +func (m *OperationManager) Prefop(persistentID string) (ret PrefopRet, err error) { + ctx := context.TODO() + reqHost := m.PrefopApiHost(persistentID) + reqURL := fmt.Sprintf("%s/status/get/prefop?id=%s", reqHost, persistentID) + headers := http.Header{} + headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) + err = m.Client.Call(ctx, &ret, "GET", reqURL, headers) + return +} + +func (m *OperationManager) ApiHost(bucket string) (apiHost string, err error) { + var zone *Zone + if m.Cfg.Zone != nil { + zone = m.Cfg.Zone + } else { + if v, zoneErr := GetZone(m.Mac.AccessKey, bucket); zoneErr != nil { + err = zoneErr + return + } else { + zone = v + } + } + + scheme := "http://" + if m.Cfg.UseHTTPS { + scheme = "https://" + } + apiHost = fmt.Sprintf("%s%s", scheme, zone.ApiHost) + + return +} + +func (m *OperationManager) PrefopApiHost(persistentID string) (apiHost string) { + apiHost = "api.qiniu.com" + if m.Cfg.Zone != nil { + apiHost = m.Cfg.Zone.ApiHost + } + + if m.Cfg.UseHTTPS { + apiHost = fmt.Sprintf("https://%s", apiHost) + } else { + apiHost = fmt.Sprintf("http://%s", apiHost) + } + + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/recorder.go b/vendor/github.com/qiniu/go-sdk/v7/storage/recorder.go new file mode 100644 index 0000000..43ebfab --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/recorder.go @@ -0,0 +1,103 @@ +package storage + +import ( + "bytes" + "crypto/sha1" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +type Recorder interface { + // 新建或更新文件分片上传的进度 + Set(key string, data []byte) error + + // 获取文件分片上传的进度信息 + Get(key string) ([]byte, error) + + // 删除文件分片上传的进度文件 + Delete(key string) error + + // 根据给定的文件信息生成持久化纪录的 key + GenerateRecorderKey(keyInfos []string, sourceFileInfo os.FileInfo) string +} + +type FileRecorder struct { + directoryPath string +} + +func NewFileRecorder(directoryPath string) (fileRecorder *FileRecorder, err error) { + err = os.MkdirAll(directoryPath, 0700) + if err != nil { + return + } + fileRecorder = &FileRecorder{directoryPath: directoryPath} + return +} + +func (fileRecorder *FileRecorder) Set(key string, data []byte) error { + path := filepath.Join(fileRecorder.directoryPath, key) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, bytes.NewReader(data)) + return err +} + +func (fileRecorder *FileRecorder) Get(key string) ([]byte, error) { + path := filepath.Join(fileRecorder.directoryPath, key) + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, err + } + if isRecorderFileOutOfDate(fileInfo) { + err = fileRecorder.Delete(key) + return nil, err + } + + buffer := new(bytes.Buffer) + _, err = io.Copy(buffer, file) + return buffer.Bytes(), err +} + +func (fileRecorder *FileRecorder) Delete(key string) error { + path := filepath.Join(fileRecorder.directoryPath, key) + return os.Remove(path) +} + +func (fileRecorder *FileRecorder) GenerateRecorderKey(keyInfos []string, sourceFileInfo os.FileInfo) string { + const delimiter = "*:|>?^ \b" + buffer := new(bytes.Buffer) + for _, keyInfo := range keyInfos { + buffer.WriteString(keyInfo) + buffer.WriteString(delimiter) + } + buffer.WriteString(sourceFileInfo.ModTime().String()) + return hashRecorderKey(buffer.Bytes()) +} + +func isRecorderFileOutOfDate(fileInfo os.FileInfo) bool { + return fileInfo.ModTime().Add(24 * 5 * time.Hour).Before(time.Now()) +} + +func hashRecorderKey(base []byte) string { + sha1edBase := sha1.Sum(base) + + var stringBuilder strings.Builder + for i := 0; i < len(sha1edBase); i++ { + b := (int64(sha1edBase[i]) & 0xff) + 0x100 + stringBuilder.WriteString(strconv.FormatInt(b, 16)[1:]) + } + return stringBuilder.String() +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/region.go b/vendor/github.com/qiniu/go-sdk/v7/storage/region.go new file mode 100644 index 0000000..6a21023 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/region.go @@ -0,0 +1,579 @@ +package storage + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/client" + "golang.org/x/sync/singleflight" +) + +// 存储所在的地区,例如华东,华南,华北 +// 每个存储区域可能有多个机房信息,每个机房可能有多个上传入口 +type Region struct { + // 上传入口 + SrcUpHosts []string `json:"src_up,omitempty"` + + // 加速上传入口 + CdnUpHosts []string `json:"cdn_up,omitempty"` + + // 获取文件信息入口 + RsHost string `json:"rs,omitempty"` + + // bucket列举入口 + RsfHost string `json:"rsf,omitempty"` + + ApiHost string `json:"api,omitempty"` + + // 存储io 入口 + IovipHost string `json:"io,omitempty"` +} + +type RegionID string + +// GetDefaultReion 根据RegionID获取对应的Region信息 +func GetRegionByID(regionID RegionID) (Region, bool) { + if r, ok := regionMap[regionID]; ok { + return r, ok + } + return Region{}, false +} + +func (r *Region) String() string { + str := "" + str += fmt.Sprintf("SrcUpHosts: %v\n", r.SrcUpHosts) + str += fmt.Sprintf("CdnUpHosts: %v\n", r.CdnUpHosts) + str += fmt.Sprintf("IovipHost: %s\n", r.IovipHost) + str += fmt.Sprintf("RsHost: %s\n", r.RsHost) + str += fmt.Sprintf("RsfHost: %s\n", r.RsfHost) + str += fmt.Sprintf("ApiHost: %s\n", r.ApiHost) + return str +} + +func endpoint(useHttps bool, host string) string { + host = strings.TrimSpace(host) + host = strings.TrimLeft(host, "http://") + host = strings.TrimLeft(host, "https://") + if host == "" { + return "" + } + scheme := "http://" + if useHttps { + scheme = "https://" + } + return fmt.Sprintf("%s%s", scheme, host) +} + +// 获取rsfHost +func (r *Region) GetRsfHost(useHttps bool) string { + return endpoint(useHttps, r.RsfHost) +} + +// 获取io host +func (r *Region) GetIoHost(useHttps bool) string { + return endpoint(useHttps, r.IovipHost) +} + +// 获取RsHost +func (r *Region) GetRsHost(useHttps bool) string { + return endpoint(useHttps, r.RsHost) +} + +// 获取api host +func (r *Region) GetApiHost(useHttps bool) string { + return endpoint(useHttps, r.ApiHost) +} + +var ( + // regionHuadong 表示华东机房 + regionHuadong = Region{ + SrcUpHosts: []string{ + "up.qiniup.com", + "up-nb.qiniup.com", + "up-xs.qiniup.com", + }, + CdnUpHosts: []string{ + "upload.qiniup.com", + "upload-nb.qiniup.com", + "upload-xs.qiniup.com", + }, + RsHost: "rs.qbox.me", + RsfHost: "rsf.qbox.me", + ApiHost: "api.qiniu.com", + IovipHost: "iovip.qbox.me", + } + + // regionHuadongZhejiang2 表示华东-浙江2 + regionHuadongZhejiang2 = Region{ + SrcUpHosts: []string{ + "up-cn-east-2.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-cn-east-2.qiniup.com", + }, + RsHost: "rs-cn-east-2.qiniuapi.com", + RsfHost: "rsf-cn-east-2-qiniuapi.com", + ApiHost: "api-cn-east-2.qiniuapi.com", + IovipHost: "iovip-cn-east-2.qiniuio.com", + } + + // regionHuabei 表示华北机房 + regionHuabei = Region{ + SrcUpHosts: []string{ + "up-z1.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-z1.qiniup.com", + }, + RsHost: "rs-z1.qbox.me", + RsfHost: "rsf-z1.qbox.me", + ApiHost: "api-z1.qiniuapi.com", + IovipHost: "iovip-z1.qbox.me", + } + // regionHuanan 表示华南机房 + regionHuanan = Region{ + SrcUpHosts: []string{ + "up-z2.qiniup.com", + "up-gz.qiniup.com", + "up-fs.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-z2.qiniup.com", + "upload-gz.qiniup.com", + "upload-fs.qiniup.com", + }, + RsHost: "rs-z2.qbox.me", + RsfHost: "rsf-z2.qbox.me", + ApiHost: "api-z2.qiniuapi.com", + IovipHost: "iovip-z2.qbox.me", + } + + // regionNorthAmerica 表示北美机房 + regionNorthAmerica = Region{ + SrcUpHosts: []string{ + "up-na0.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-na0.qiniup.com", + }, + RsHost: "rs-na0.qbox.me", + RsfHost: "rsf-na0.qbox.me", + ApiHost: "api-na0.qiniuapi.com", + IovipHost: "iovip-na0.qbox.me", + } + // regionSingapore 表示新加坡机房 + regionSingapore = Region{ + SrcUpHosts: []string{ + "up-as0.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-as0.qiniup.com", + }, + RsHost: "rs-as0.qbox.me", + RsfHost: "rsf-as0.qbox.me", + ApiHost: "api-as0.qiniuapi.com", + IovipHost: "iovip-as0.qbox.me", + } + // regionFogCnEast1 表示雾存储华东区 + regionFogCnEast1 = Region{ + SrcUpHosts: []string{ + "up-fog-cn-east-1.qiniup.com", + }, + CdnUpHosts: []string{ + "upload-fog-cn-east-1.qiniup.com", + }, + RsHost: "rs-fog-cn-east-1.qbox.me", + RsfHost: "rsf-fog-cn-east-1.qbox.me", + ApiHost: "api-fog-cn-east-1.qiniuapi.com", + IovipHost: "iovip-fog-cn-east-1.qbox.me", + } +) + +const ( + // region code + RIDHuadong = RegionID("z0") + RIDHuadongZheJiang2 = RegionID("cn-east-2") + RIDHuabei = RegionID("z1") + RIDHuanan = RegionID("z2") + RIDNorthAmerica = RegionID("na0") + RIDSingapore = RegionID("as0") + RIDFogCnEast1 = RegionID("fog-cn-east-1") +) + +// regionMap 是RegionID到具体的Region的映射 +var regionMap = map[RegionID]Region{ + RIDHuadong: regionHuadong, + RIDHuadongZheJiang2: regionHuadongZhejiang2, + RIDHuanan: regionHuanan, + RIDHuabei: regionHuabei, + RIDSingapore: regionSingapore, + RIDNorthAmerica: regionNorthAmerica, + RIDFogCnEast1: regionFogCnEast1, +} + +/// UcHost 为查询空间相关域名的API服务地址 +/// 设置 UcHost 时,如果不指定 scheme 默认会使用 https +/// UcHost 已废弃,建议使用 SetUcHost +//Deprecated +var UcHost = "https://uc.qbox.me" + +var ucHost = "" + +func SetUcHost(host string, useHttps bool) { + ucHost = endpoint(useHttps, host) +} + +func getUcHostByDefaultProtocol() string { + return getUcHost(true) +} + +func getUcHost(useHttps bool) string { + if ucHost != "" { + return ucHost + } + + if strings.Contains(UcHost, "://") { + return UcHost + } else { + return endpoint(useHttps, UcHost) + } +} + +// UcQueryRet 为查询请求的回复 +type UcQueryRet struct { + TTL int `json:"ttl"` + Io map[string]map[string][]string + IoInfo map[string]UcQueryIo `json:"io"` + Up map[string]UcQueryUp `json:"up"` +} + +func (uc *UcQueryRet) UnmarshalJSON(data []byte) error { + t := struct { + TTL int `json:"ttl"` + IoInfo map[string]UcQueryIo `json:"io"` + Up map[string]UcQueryUp `json:"up"` + }{} + if err := json.Unmarshal(data, &t); err != nil { + return err + } + + uc.TTL = t.TTL + uc.IoInfo = t.IoInfo + uc.Up = t.Up + uc.setup() + return nil +} + +func (uc *UcQueryRet) setup() { + if uc.Io != nil || uc.IoInfo == nil { + return + } + + uc.Io = make(map[string]map[string][]string) + ioSrc := uc.IoInfo["src"].toMapWithoutInfo() + if ioSrc != nil && len(ioSrc) > 0 { + uc.Io["src"] = ioSrc + } + + ioOldSrc := uc.IoInfo["old_src"].toMapWithoutInfo() + if ioOldSrc != nil && len(ioOldSrc) > 0 { + uc.Io["old_src"] = ioOldSrc + } +} + +// UcQueryUp 为查询请求回复中的上传域名信息 +type UcQueryUp struct { + Main []string `json:"main,omitempty"` + Backup []string `json:"backup,omitempty"` + Info string `json:"info,omitempty"` +} + +// UcQueryIo 为查询请求回复中的上传域名信息 +type UcQueryIo struct { + Main []string `json:"main,omitempty"` + Backup []string `json:"backup,omitempty"` + Info string `json:"info,omitempty"` +} + +func (io UcQueryIo) toMapWithoutInfo() map[string][]string { + + ret := make(map[string][]string) + if io.Main != nil && len(io.Main) > 0 { + ret["main"] = io.Main + } + + if io.Backup != nil && len(io.Backup) > 0 { + ret["backup"] = io.Backup + } + + return ret +} + +type regionCacheValue struct { + Region *Region `json:"region"` + Deadline time.Time `json:"deadline"` +} + +type regionCacheMap map[string]regionCacheValue + +var ( + regionCachePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", "query.cache.json") + regionCache sync.Map + regionCacheLock sync.RWMutex + regionCacheSyncLock sync.Mutex + regionCacheGroup singleflight.Group + regionCacheLoaded bool = false +) + +func SetRegionCachePath(newPath string) { + regionCacheLock.Lock() + defer regionCacheLock.Unlock() + + regionCachePath = newPath + regionCacheLoaded = false +} + +func loadRegionCache() { + cacheFile, err := os.Open(regionCachePath) + if err != nil { + return + } + defer cacheFile.Close() + + var cacheMap regionCacheMap + if err = json.NewDecoder(cacheFile).Decode(&cacheMap); err != nil { + return + } + for cacheKey, cacheValue := range cacheMap { + regionCache.Store(cacheKey, cacheValue) + } +} + +func storeRegionCache() { + err := os.MkdirAll(filepath.Dir(regionCachePath), 0700) + if err != nil { + return + } + + cacheFile, err := os.OpenFile(regionCachePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return + } + defer cacheFile.Close() + + cacheMap := make(regionCacheMap) + regionCache.Range(func(cacheKey, cacheValue interface{}) bool { + cacheMap[cacheKey.(string)] = cacheValue.(regionCacheValue) + return true + }) + if err = json.NewEncoder(cacheFile).Encode(cacheMap); err != nil { + return + } +} + +// GetRegion 用来根据ak和bucket来获取空间相关的机房信息 +func GetRegion(ak, bucket string) (*Region, error) { + regionCacheLock.RLock() + if regionCacheLoaded { + regionCacheLock.RUnlock() + } else { + regionCacheLock.RUnlock() + func() { + regionCacheLock.Lock() + defer regionCacheLock.Unlock() + + if !regionCacheLoaded { + loadRegionCache() + regionCacheLoaded = true + } + }() + } + + regionID := fmt.Sprintf("%s:%s", ak, bucket) + //check from cache + if v, ok := regionCache.Load(regionID); ok && time.Now().Before(v.(regionCacheValue).Deadline) { + return v.(regionCacheValue).Region, nil + } + + newRegion, err, _ := regionCacheGroup.Do(regionID, func() (interface{}, error) { + reqURL := fmt.Sprintf("%s/v2/query?ak=%s&bucket=%s", getUcHostByDefaultProtocol(), ak, bucket) + + var ret UcQueryRet + err := client.DefaultClient.CallWithForm(context.Background(), &ret, "GET", reqURL, nil, nil) + if err != nil { + return nil, fmt.Errorf("query region error, %s", err.Error()) + } + + if len(ret.Io["src"]["main"]) <= 0 { + return nil, fmt.Errorf("empty io host list") + } + + ioHost := ret.Io["src"]["main"][0] + srcUpHosts := ret.Up["src"].Main + if ret.Up["src"].Backup != nil { + srcUpHosts = append(srcUpHosts, ret.Up["src"].Backup...) + } + cdnUpHosts := ret.Up["acc"].Main + if ret.Up["acc"].Backup != nil { + cdnUpHosts = append(cdnUpHosts, ret.Up["acc"].Backup...) + } + + region := &Region{ + SrcUpHosts: srcUpHosts, + CdnUpHosts: cdnUpHosts, + IovipHost: ioHost, + RsHost: DefaultRsHost, + RsfHost: DefaultRsfHost, + ApiHost: DefaultAPIHost, + } + + //set specific hosts if possible + setSpecificHosts(ioHost, region) + regionCache.Store(regionID, regionCacheValue{ + Region: region, + Deadline: time.Now().Add(time.Duration(ret.TTL) * time.Second), + }) + + regionCacheSyncLock.Lock() + defer regionCacheSyncLock.Unlock() + + storeRegionCache() + return region, nil + }) + + if err != nil { + return nil, err + } else { + return newRegion.(*Region), nil + } +} + +type ucRegionsRet struct { + Regions []ucRegionRet `json:"regions"` +} + +type ucRegionRet struct { + Id string `json:"id"` + Up ucDomainsRet `json:"up"` + Io ucDomainsRet `json:"io"` + Uc ucDomainsRet `json:"uc"` + Rs ucDomainsRet `json:"rs"` + Rsf ucDomainsRet `json:"rsf"` + Api ucDomainsRet `json:"api"` +} + +type ucDomainsRet struct { + Main []string `json:"domains"` + Backup []string `json:"old,omitempty"` +} + +var ( + regionIdCache = make(map[string]*Region) + regionIdCacheGroup singleflight.Group +) + +// getRegionByRegionId 用来根据 ak 和 sk 来获取空间相关的机房信息,由于返回的 Region 结构体与先前不兼容,所以本接口暂不开放 +func getRegionByRegionId(regionId string, credentials *auth.Credentials) (region *Region, err error) { + var cacheValue interface{} + + cacheValue, err, _ = regionIdCacheGroup.Do("query", func() (interface{}, error) { + if v, ok := regionIdCache[regionId]; ok { + return v, nil + } + reqURL := fmt.Sprintf("%s/regions", getUcHostByDefaultProtocol()) + var ret ucRegionsRet + ctx := context.TODO() + qErr := client.DefaultClient.CredentialedCallWithForm(ctx, credentials, auth.TokenQiniu, &ret, "GET", reqURL, nil, nil) + if qErr != nil { + return nil, fmt.Errorf("query region error, %s", qErr.Error()) + } + for _, r := range ret.Regions { + upHosts := r.Up.Main + if len(r.Up.Backup) > 0 { + upHosts = append(upHosts, r.Up.Backup...) + } + if len(r.Io.Main) == 0 { + return nil, fmt.Errorf("empty io host list") + } + if len(r.Rs.Main) == 0 { + return nil, fmt.Errorf("empty rs host list") + } + if len(r.Rsf.Main) == 0 { + return nil, fmt.Errorf("empty rsf host list") + } + if len(r.Api.Main) == 0 { + return nil, fmt.Errorf("empty api host list") + } + + region := &Region{ + SrcUpHosts: upHosts, + CdnUpHosts: upHosts, + IovipHost: r.Io.Main[0], + RsHost: r.Rs.Main[0], + RsfHost: r.Rsf.Main[0], + ApiHost: r.Api.Main[0], + } + regionIdCache[r.Id] = region + } + if v, ok := regionIdCache[regionId]; ok { + return v, nil + } else { + return nil, fmt.Errorf("region id is not found") + } + }) + if err != nil { + return nil, err + } + region = cacheValue.(*Region) + return +} + +func regionFromHost(ioHost string) (Region, bool) { + if strings.Contains(ioHost, "-z1") { + return GetRegionByID(RIDHuabei) + } + if strings.Contains(ioHost, "-z2") { + return GetRegionByID(RIDHuanan) + } + + if strings.Contains(ioHost, "-na0") { + return GetRegionByID(RIDNorthAmerica) + } + if strings.Contains(ioHost, "-as0") { + return GetRegionByID(RIDSingapore) + } + return Region{}, false +} + +func setSpecificHosts(ioHost string, region *Region) { + r, ok := regionFromHost(ioHost) + if ok { + region.RsHost = r.RsHost + region.RsfHost = r.RsfHost + region.ApiHost = r.ApiHost + } +} + +type RegionInfo struct { + ID string `json:"id"` + Description string `json:"description"` +} + +func GetRegionsInfo(mac *auth.Credentials) ([]RegionInfo, error) { + var regions struct { + Regions []RegionInfo `json:"regions"` + } + qErr := client.DefaultClient.CredentialedCallWithForm(context.Background(), mac, auth.TokenQiniu, ®ions, "GET", getUcHostByDefaultProtocol()+"/regions", nil, nil) + if qErr != nil { + return nil, fmt.Errorf("query region error, %s", qErr.Error()) + } else { + return regions.Regions, nil + } +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader.go b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader.go new file mode 100644 index 0000000..49a0c7e --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader.go @@ -0,0 +1,437 @@ +package storage + +import ( + "bytes" + "context" + "encoding/json" + "hash/crc32" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "github.com/qiniu/go-sdk/v7/client" +) + +// ResumeUploader 表示一个分片上传的对象 +type ResumeUploader struct { + Client *client.Client + Cfg *Config +} + +// NewResumeUploader 表示构建一个新的分片上传的对象 +func NewResumeUploader(cfg *Config) *ResumeUploader { + return NewResumeUploaderEx(cfg, nil) +} + +// NewResumeUploaderEx 表示构建一个新的分片上传的对象 +func NewResumeUploaderEx(cfg *Config, clt *client.Client) *ResumeUploader { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + + return &ResumeUploader{ + Client: clt, + Cfg: cfg, + } +} + +// Put 方法用来上传一个文件,支持断点续传和分块上传。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 +// +func (p *ResumeUploader) Put(ctx context.Context, ret interface{}, upToken string, key string, f io.ReaderAt, fsize int64, extra *RputExtra) error { + return p.rput(ctx, ret, upToken, key, true, f, fsize, nil, extra) +} + +func (p *ResumeUploader) PutWithoutSize(ctx context.Context, ret interface{}, upToken, key string, r io.Reader, extra *RputExtra) error { + return p.rputWithoutSize(ctx, ret, upToken, key, true, r, extra) +} + +// PutWithoutKey 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 +// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 +// +func (p *ResumeUploader) PutWithoutKey(ctx context.Context, ret interface{}, upToken string, f io.ReaderAt, fsize int64, extra *RputExtra) error { + return p.rput(ctx, ret, upToken, "", false, f, fsize, nil, extra) +} + +// PutWithoutKeyAndSize 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 +// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// f 是文件内容的访问接口。 +// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 +// +func (p *ResumeUploader) PutWithoutKeyAndSize(ctx context.Context, ret interface{}, upToken string, f io.Reader, extra *RputExtra) error { + return p.rputWithoutSize(ctx, ret, upToken, "", false, f, extra) +} + +// PutFile 用来上传一个文件,支持断点续传和分块上传。 +// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 +// +func (p *ResumeUploader) PutFile(ctx context.Context, ret interface{}, upToken, key, localFile string, extra *RputExtra) error { + return p.rputFile(ctx, ret, upToken, key, true, localFile, extra) +} + +// PutFileWithoutKey 上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 +// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// 和 PutWithoutKey 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 +// +func (p *ResumeUploader) PutFileWithoutKey(ctx context.Context, ret interface{}, upToken, localFile string, extra *RputExtra) error { + return p.rputFile(ctx, ret, upToken, "", false, localFile, extra) +} + +type fileDetailsInfo struct { + fileFullPath string + fileInfo os.FileInfo +} + +func (p *ResumeUploader) rput(ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, f io.ReaderAt, fsize int64, fileDetails *fileDetailsInfo, extra *RputExtra) (err error) { + if extra == nil { + extra = &RputExtra{} + } + extra.init() + + var ( + upHost, accessKey, bucket, recorderKey string + fileInfo os.FileInfo = nil + ) + + if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + return + } + + if extra.UpHost != "" { + upHost = extra.getUpHost(p.Cfg.UseHTTPS) + } else { + upHost, err = p.resumeUploaderAPIs().upHost(accessKey, bucket) + if err != nil { + return + } + } + + if extra.Recorder != nil && fileDetails != nil { + recorderKey = extra.Recorder.GenerateRecorderKey( + []string{accessKey, bucket, key, "v1", fileDetails.fileFullPath, strconv.FormatInt(1< chunkSize { + chunkRange.Size = chunkSize + } + hasher := crc32.NewIEEE() + buffer.Reset() + realChunkSize, err = io.Copy(hasher, io.TeeReader(io.NewSectionReader(c.reader, chunkRange.From, chunkRange.Size), buffer)) + if err != nil { + impl.extra.NotifyErr(int(c.id), int(c.size), err) + return err + } else if realChunkSize == 0 { + break + } else { + totalChunkSize += realChunkSize + } + crc32Value := hasher.Sum32() + UploadSingleChunk: + for retried := 0; retried < impl.extra.TryTimes; retried += 1 { + if chunkOffset == 0 { + err = apis.mkBlk(ctx, impl.upToken, impl.upHost, &blkPutRet, c.size, buffer, realChunkSize) + } else { + err = apis.bput(ctx, impl.upToken, &blkPutRet, buffer, realChunkSize) + } + if err != nil { + if err == context.Canceled { + break UploadSingleChunk + } + continue UploadSingleChunk + } + if blkPutRet.Crc32 != crc32Value || int64(blkPutRet.Offset) != chunkOffset+realChunkSize { + err = ErrUnmatchedChecksum + continue UploadSingleChunk + } + break UploadSingleChunk + } + if err != nil { + impl.extra.NotifyErr(int(c.id), int(realChunkSize), err) + return err + } + } + + blkPutRet.blkIdx = int(c.id) + blkPutRet.fileOffset = c.offset + blkPutRet.chunkSize = int(totalChunkSize) + impl.extra.Notify(blkPutRet.blkIdx, int(totalChunkSize), &blkPutRet) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + func() { + impl.lock.Lock() + defer impl.lock.Unlock() + impl.extra.Progresses = append(impl.extra.Progresses, blkPutRet) + impl.fileSize += c.size + impl.save(ctx) + }() + + return nil +} + +func (impl *resumeUploaderImpl) final(ctx context.Context) error { + if impl.extra.Recorder != nil { + impl.extra.Recorder.Delete(impl.recorderKey) + } + + sort.Sort(blkputRets(impl.extra.Progresses)) + return impl.resumeUploaderAPIs().mkfile(ctx, impl.upToken, impl.upHost, impl.ret, impl.key, impl.hasKey, impl.fileSize, impl.extra) +} + +func (impl *resumeUploaderImpl) recover(ctx context.Context, recoverData []byte) (recovered []int64) { + var recoveryInfo resumeUploaderRecoveryInfo + if err := json.Unmarshal(recoverData, &recoveryInfo); err != nil { + return + } + if impl.fileInfo == nil || recoveryInfo.FileSize != impl.fileInfo.Size() || recoveryInfo.ModTimeStamp != impl.fileInfo.ModTime().UnixNano() { + return + } + + for _, c := range recoveryInfo.Contexts { + if time.Now().Before(time.Unix(c.ExpiredAt, 0)) { + impl.fileSize += int64(c.ChunkSize) + impl.extra.Progresses = append(impl.extra.Progresses, BlkputRet{ + blkIdx: c.Idx, fileOffset: c.Offset, chunkSize: c.ChunkSize, Ctx: c.Ctx, ExpiredAt: c.ExpiredAt, + }) + recovered = append(recovered, int64(c.Offset)) + } + } + + return +} + +func (impl *resumeUploaderImpl) save(ctx context.Context) { + var ( + recoveryInfo resumeUploaderRecoveryInfo + recoveredData []byte + err error + ) + + if impl.fileInfo == nil || impl.extra.Recorder == nil { + return + } + + recoveryInfo.FileSize = impl.fileInfo.Size() + recoveryInfo.ModTimeStamp = impl.fileInfo.ModTime().UnixNano() + recoveryInfo.Contexts = make([]resumeUploaderRecoveryInfoContext, 0, len(impl.extra.Progresses)) + + for _, progress := range impl.extra.Progresses { + recoveryInfo.Contexts = append(recoveryInfo.Contexts, resumeUploaderRecoveryInfoContext{ + Ctx: progress.Ctx, Idx: progress.blkIdx, Offset: progress.fileOffset, ChunkSize: progress.chunkSize, ExpiredAt: progress.ExpiredAt, + }) + } + + if recoveredData, err = json.Marshal(recoveryInfo); err != nil { + return + } + + err = impl.extra.Recorder.Set(impl.recorderKey, recoveredData) +} + +func (impl *resumeUploaderImpl) resumeUploaderAPIs() *resumeUploaderAPIs { + return &resumeUploaderAPIs{Client: impl.client, Cfg: impl.cfg} +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_apis.go b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_apis.go new file mode 100644 index 0000000..916264b --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_apis.go @@ -0,0 +1,262 @@ +package storage + +import ( + "context" + "encoding/base64" + "io" + "net/http" + "strconv" + "strings" + + "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/conf" +) + +type resumeUploaderAPIs struct { + Client *client.Client + Cfg *Config +} + +// BlkputRet 表示分片上传每个片上传完毕的返回值 +type BlkputRet struct { + Ctx string `json:"ctx"` + Checksum string `json:"checksum"` + Crc32 uint32 `json:"crc32"` + Offset uint32 `json:"offset"` + Host string `json:"host"` + ExpiredAt int64 `json:"expired_at"` + chunkSize int + fileOffset int64 + blkIdx int +} + +func (p *resumeUploaderAPIs) mkBlk(ctx context.Context, upToken, upHost string, ret *BlkputRet, blockSize int64, body io.Reader, size int64) error { + reqUrl := upHost + "/mkblk/" + strconv.FormatInt(blockSize, 10) + + return p.Client.CallWith64(ctx, ret, "POST", reqUrl, makeHeadersForUpload(upToken), body, size) +} + +func (p *resumeUploaderAPIs) bput(ctx context.Context, upToken string, ret *BlkputRet, body io.Reader, size int64) error { + reqUrl := ret.Host + "/bput/" + ret.Ctx + "/" + strconv.FormatUint(uint64(ret.Offset), 10) + + return p.Client.CallWith64(ctx, ret, "POST", reqUrl, makeHeadersForUpload(upToken), body, size) +} + +// RputExtra 表示分片上传额外可以指定的参数 +type RputExtra struct { + Recorder Recorder // 可选。上传进度记录 + Params map[string]string // 可选。用户自定义参数,以"x:"开头,而且值不能为空,否则忽略 + UpHost string + MimeType string // 可选。 + ChunkSize int // 可选。每次上传的Chunk大小 + TryTimes int // 可选。尝试次数 + Progresses []BlkputRet // 可选。上传进度 + Notify func(blkIdx int, blkSize int, ret *BlkputRet) // 可选。进度提示(注意多个block是并行传输的) + NotifyErr func(blkIdx int, blkSize int, err error) +} + +func (extra *RputExtra) getUpHost(useHttps bool) string { + return hostAddSchemeIfNeeded(useHttps, extra.UpHost) +} + +func (p *resumeUploaderAPIs) mkfile(ctx context.Context, upToken, upHost string, ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra) (err error) { + url := upHost + "/mkfile/" + strconv.FormatInt(fsize, 10) + if extra == nil { + extra = &RputExtra{} + } + if extra.MimeType != "" { + url += "/mimeType/" + encode(extra.MimeType) + } + if hasKey { + url += "/key/" + encode(key) + } + for k, v := range extra.Params { + if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { + url += "/" + k + "/" + encode(v) + } + } + ctxs := make([]string, len(extra.Progresses)) + for i, progress := range extra.Progresses { + ctxs[i] = progress.Ctx + } + buf := strings.Join(ctxs, ",") + return p.Client.CallWith(ctx, ret, "POST", url, makeHeadersForUpload(upToken), strings.NewReader(buf), len(buf)) +} + +// InitPartsRet 表示分片上传 v2 初始化完毕的返回值 +type InitPartsRet struct { + UploadID string `json:"uploadId"` +} + +func (p *resumeUploaderAPIs) initParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, ret *InitPartsRet) error { + reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads" + + return p.Client.CallWith(ctx, ret, "POST", reqUrl, makeHeadersForUploadEx(upToken, ""), nil, 0) +} + +// UploadPartsRet 表示分片上传 v2 每个片上传完毕的返回值 +type UploadPartsRet struct { + Etag string `json:"etag"` + MD5 string `json:"md5"` +} + +func (p *resumeUploaderAPIs) uploadParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, uploadId string, partNumber int64, partMD5 string, ret *UploadPartsRet, body io.Reader, size int64) error { + reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads/" + uploadId + "/" + strconv.FormatInt(partNumber, 10) + + return p.Client.CallWith64(ctx, ret, "PUT", reqUrl, makeHeadersForUploadPart(upToken, partMD5), body, size) +} + +type UploadPartInfo struct { + Etag string `json:"etag"` + PartNumber int64 `json:"partNumber"` + partSize int + fileOffset int64 +} + +// RputV2Extra 表示分片上传 v2 额外可以指定的参数 +type RputV2Extra struct { + Recorder Recorder // 可选。上传进度记录 + Metadata map[string]string // 可选。用户自定义文件 metadata 信息 + CustomVars map[string]string // 可选。用户自定义参数,以"x:"开头,而且值不能为空,否则忽略 + UpHost string + MimeType string // 可选。 + PartSize int64 // 可选。每次上传的块大小 + TryTimes int // 可选。尝试次数 + Progresses []UploadPartInfo // 上传进度 + Notify func(partNumber int64, ret *UploadPartsRet) // 可选。进度提示(注意多个block是并行传输的) + NotifyErr func(partNumber int64, err error) +} + +func hostAddSchemeIfNeeded(useHttps bool, host string) string { + if host == "" { + return "" + } else if strings.Contains(host, "://") { + return host + } else { + return endpoint(true, host) + } +} + +func (extra *RputV2Extra) getUpHost(useHttps bool) string { + return hostAddSchemeIfNeeded(useHttps, extra.UpHost) +} + +func (p *resumeUploaderAPIs) completeParts(ctx context.Context, upToken, upHost string, ret interface{}, bucket, key string, hasKey bool, uploadId string, extra *RputV2Extra) (err error) { + type CompletePartBody struct { + Parts []UploadPartInfo `json:"parts"` + MimeType string `json:"mimeType,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + CustomVars map[string]string `json:"customVars,omitempty"` + } + if extra == nil { + extra = &RputV2Extra{} + } + completePartBody := CompletePartBody{ + Parts: extra.Progresses, + MimeType: extra.MimeType, + Metadata: extra.Metadata, + CustomVars: make(map[string]string), + } + for k, v := range extra.CustomVars { + if strings.HasPrefix(k, "x:") && v != "" { + completePartBody.CustomVars[k] = v + } + } + + reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads/" + uploadId + + return p.Client.CallWithJson(ctx, ret, "POST", reqUrl, makeHeadersForUploadEx(upToken, conf.CONTENT_TYPE_JSON), &completePartBody) +} + +func (p *resumeUploaderAPIs) upHost(ak, bucket string) (upHost string, err error) { + return getUpHost(p.Cfg, ak, bucket) +} + +func makeHeadersForUpload(upToken string) http.Header { + return makeHeadersForUploadEx(upToken, conf.CONTENT_TYPE_OCTET) +} + +func makeHeadersForUploadPart(upToken, partMD5 string) http.Header { + headers := makeHeadersForUpload(upToken) + headers.Add("Content-MD5", partMD5) + return headers +} + +func makeHeadersForUploadEx(upToken, contentType string) http.Header { + headers := http.Header{} + if contentType != "" { + headers.Add("Content-Type", contentType) + } + headers.Add("Authorization", "UpToken "+upToken) + return headers +} + +func encode(raw string) string { + return base64.URLEncoding.EncodeToString([]byte(raw)) +} + +func encodeV2(key string, hasKey bool) string { + if !hasKey { + return "~" + } else { + return encode(key) + } +} + +func (r *RputExtra) init() { + if r.ChunkSize == 0 { + r.ChunkSize = settings.ChunkSize + } + if r.TryTimes == 0 { + r.TryTimes = settings.TryTimes + } + if r.Notify == nil { + r.Notify = func(blkIdx, blkSize int, ret *BlkputRet) {} + } + if r.NotifyErr == nil { + r.NotifyErr = func(blkIdx, blkSize int, err error) {} + } +} + +func (r *RputV2Extra) init() { + if r.PartSize == 0 { + r.PartSize = settings.PartSize + } + if r.TryTimes == 0 { + r.TryTimes = settings.TryTimes + } + if r.Notify == nil { + r.Notify = func(partNumber int64, ret *UploadPartsRet) {} + } + if r.NotifyErr == nil { + r.NotifyErr = func(partNumber int64, err error) {} + } +} + +type blkputRets []BlkputRet + +func (rets blkputRets) Len() int { + return len(rets) +} + +func (rets blkputRets) Less(i, j int) bool { + return rets[i].blkIdx < rets[j].blkIdx +} + +func (rets blkputRets) Swap(i, j int) { + rets[i], rets[j] = rets[j], rets[i] +} + +type uploadPartInfos []UploadPartInfo + +func (infos uploadPartInfos) Len() int { + return len(infos) +} + +func (infos uploadPartInfos) Less(i, j int) bool { + return infos[i].PartNumber < infos[j].PartNumber +} + +func (infos uploadPartInfos) Swap(i, j int) { + infos[i], infos[j] = infos[j], infos[i] +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_base.go b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_base.go new file mode 100644 index 0000000..f6f2e3a --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_base.go @@ -0,0 +1,287 @@ +package storage + +import ( + "bytes" + "context" + "errors" + "io" + "sync" + + api "github.com/qiniu/go-sdk/v7" +) + +// 分片上传过程中可能遇到的错误 +var ErrUnmatchedChecksum = errors.New("unmatched checksum") + +const ( + // 获取下一个分片Reader失败 + ErrNextReader = "ErrNextReader" + // 超过了最大的重试上传次数 + ErrMaxUpRetry = "ErrMaxUpRetry" +) + +const ( + blockBits = 22 + blockMask = (1 << blockBits) - 1 +) + +// Settings 为分片上传设置 +type Settings struct { + TaskQsize int // 可选。任务队列大小。为 0 表示取 Workers * 4。 + Workers int // 并行 Goroutine 数目。 + ChunkSize int // 默认的Chunk大小,不设定则为4M(仅在分片上传 v1 中使用) + PartSize int64 // 默认的Part大小,不设定则为4M(仅在分片上传 v2 中使用) + TryTimes int // 默认的尝试次数,不设定则为3 +} + +// 分片上传默认参数设置 +const ( + defaultWorkers = 4 // 默认的并发上传的块数量 + defaultChunkSize = 4 * 1024 * 1024 // 默认的分片大小,4MB(仅在分片上传 v1 中使用) + defaultPartSize = 4 * 1024 * 1024 // 默认的分片大小,4MB(仅在分片上传 v2 中使用) + defaultTryTimes = 3 // mkblk / bput / uploadParts 失败重试次数 +) + +// 分片上传的默认设置 +var settings = Settings{ + TaskQsize: defaultWorkers * 4, + Workers: defaultWorkers, + ChunkSize: defaultChunkSize, + PartSize: defaultPartSize, + TryTimes: defaultTryTimes, +} + +// SetSettings 可以用来设置分片上传参数 +func SetSettings(v *Settings) { + settings = *v + if settings.Workers == 0 { + settings.Workers = defaultWorkers + } + if settings.TaskQsize == 0 { + settings.TaskQsize = settings.Workers * 4 + } + if settings.ChunkSize == 0 { + settings.ChunkSize = defaultChunkSize + } + if settings.PartSize == 0 { + settings.PartSize = defaultPartSize + } + if settings.TryTimes == 0 { + settings.TryTimes = defaultTryTimes + } +} + +var ( + tasks chan func() + once sync.Once +) + +func worker(tasks chan func()) { + for task := range tasks { + task() + } +} + +func _initWorkers() { + tasks = make(chan func(), settings.TaskQsize) + for i := 0; i < settings.Workers; i++ { + go worker(tasks) + } +} + +func initWorkers() { + once.Do(_initWorkers) +} + +// 代表一块分片的基本信息 +type ( + chunk struct { + id int64 + offset int64 + size int64 + reader io.ReaderAt + retried int + } + + // 代表一块分片的上传错误信息 + chunkError struct { + chunk + err error + } + + // 通用分片上传接口,同时适用于分片上传 v1 和 v2 接口 + resumeUploaderBase interface { + // 开始上传前调用一次用于初始化,在 v1 中该接口不做任何事情,而在 v2 中该接口对应 initParts + initUploader(context.Context) ([]int64, error) + // 上传实际的分片数据,允许并发上传。在 v1 中该接口对应 mkblk 和 bput 组合,而在 v2 中该接口对应 uploadParts + uploadChunk(context.Context, chunk) error + // 上传所有分片后调用一次用于结束上传,在 v1 中该接口对应 mkfile,而在 v2 中该接口对应 completeParts + final(context.Context) error + } + + // 将已知数据流大小的情况和未知数据流大小的情况抽象成一个接口 + chunkReader interface { + readChunks([]int64, func(chunkID, off, size int64, reader io.ReaderAt) error) error + } + + // 已知数据流大小的情况下读取数据流 + unsizedChunkReader struct { + body io.Reader + blockSize int64 + } + + // 未知数据流大小的情况下读取数据流 + sizedChunkReader struct { + body io.ReaderAt + totalSize int64 + blockSize int64 + } +) + +// 使用并发 Goroutine 上传数据 +func uploadByWorkers(uploader resumeUploaderBase, ctx context.Context, body chunkReader, tryTimes int) (err error) { + var ( + wg sync.WaitGroup + failedChunks sync.Map + recovered []int64 + ) + + initWorkers() + + if recovered, err = uploader.initUploader(ctx); err != nil { + return + } + + // 读取 Chunk 并创建任务 + err = body.readChunks(recovered, func(chunkID, off, size int64, reader io.ReaderAt) error { + newChunk := chunk{id: chunkID, offset: off, size: size, reader: reader, retried: 0} + wg.Add(1) + tasks <- func() { + defer wg.Done() + if err := uploader.uploadChunk(ctx, newChunk); err != nil { + newChunk.retried += 1 + failedChunks.LoadOrStore(newChunk.id, chunkError{chunk: newChunk, err: err}) + } + } + return nil + }) + if err != nil { + return err + } + + for { + // 等待这一轮任务执行完毕 + wg.Wait() + // 不断重试先前失败的任务 + failedTasks := 0 + failedChunks.Range(func(key, chunkValue interface{}) bool { + chunkErr := chunkValue.(chunkError) + if chunkErr.err == context.Canceled { + err = chunkErr.err + return false + } + + failedChunks.Delete(key) + if chunkErr.retried < tryTimes { + failedTasks += 1 + wg.Add(1) + tasks <- func() { + defer wg.Done() + if cerr := uploader.uploadChunk(ctx, chunkErr.chunk); cerr != nil { + chunkErr.retried += 1 + failedChunks.LoadOrStore(chunkErr.id, chunkErr) + } + } + return true + } else { + err = chunkErr.err + return false + } + }) + if err != nil { + err = api.NewError(ErrMaxUpRetry, err.Error()) + return + } else if failedTasks == 0 { + break + } + } + + err = uploader.final(ctx) + return +} + +func newUnsizedChunkReader(body io.Reader, blockSize int64) *unsizedChunkReader { + return &unsizedChunkReader{body: body, blockSize: blockSize} +} + +func (r *unsizedChunkReader) readChunks(recovered []int64, f func(chunkID, off, size int64, reader io.ReaderAt) error) error { + var ( + lastChunk = false + chunkID int64 = 0 + off int64 = 0 + chunkSize int + err error + recoveredMap = make(map[int64]struct{}, len(recovered)) + ) + + for _, roff := range recovered { + recoveredMap[roff] = struct{}{} + } + + for !lastChunk { + buf := make([]byte, r.blockSize) + if chunkSize, err = io.ReadFull(r.body, buf); err != nil { + switch err { + case io.EOF: + lastChunk = true + return nil + case io.ErrUnexpectedEOF: + buf = buf[:chunkSize] + lastChunk = true + default: + return api.NewError(ErrNextReader, err.Error()) + } + } + + if _, ok := recoveredMap[off]; !ok { + if err = f(chunkID, off, int64(len(buf)), bytes.NewReader(buf)); err != nil { + return err + } + } + chunkID += 1 + off += int64(chunkSize) + } + return nil +} + +func newSizedChunkReader(body io.ReaderAt, totalSize, blockSize int64) *sizedChunkReader { + return &sizedChunkReader{body: body, totalSize: totalSize, blockSize: blockSize} +} + +func (r *sizedChunkReader) readChunks(recovered []int64, f func(chunkID, off, size int64, reader io.ReaderAt) error) error { + var ( + chunkID int64 = 0 + off int64 = 0 + err error + recoveredMap = make(map[int64]struct{}, len(recovered)) + ) + + for _, roff := range recovered { + recoveredMap[roff] = struct{}{} + } + + for off < r.totalSize { + shouldRead := r.totalSize - off + if shouldRead > r.blockSize { + shouldRead = r.blockSize + } + if _, ok := recoveredMap[off]; !ok { + if err = f(chunkID, off, shouldRead, io.NewSectionReader(r.body, off, shouldRead)); err != nil { + return err + } + } + off += shouldRead + chunkID += 1 + } + return nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_deprecated.go b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_deprecated.go new file mode 100644 index 0000000..26a3bb4 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_deprecated.go @@ -0,0 +1,91 @@ +package storage + +import ( + "errors" + "io" + + "github.com/qiniu/go-sdk/v7" +) + +// 分片上传过程中可能遇到的错误 +var ( + ErrInvalidPutProgress = errors.New("invalid put progress") + ErrPutFailed = errors.New("resumable put failed") + ErrBadToken = errors.New("invalid token") +) + +const ( + // 上传一个分片失败 + ErrUploadChunkFailed = "ErrUploadChunkFailed" + // 取消了分片的上传 + ErrChunkUpCanceled = "ErrChunkUpCanceled" +) + +// BlockCount 用来计算文件的分块数量 +func BlockCount(fsize int64) int { + return int((fsize + blockMask) >> blockBits) +} + +// 上传进度过期错误 +const ( + InvalidCtx = 701 // UP: 无效的上下文(bput),可能情况:Ctx非法或者已经被淘汰(太久未使用) +) + +// ChunkPutRetryer 上传分片失败时候重试接口 +type ChunkPutRetryer interface { + Retry(ck *Chunk) +} + +// Chunk表示要上传的数据块, 该片的大小不能大于4M +// 上传块的过程: 1. 调用接口在七牛后端创建块 2. 上传数据到该块 +// 详细可以参考 https://developer.qiniu.com/kodo/api/1286/mkblk +type Chunk struct { + // 要上传的块数据 + Body io.ReadSeeker + + // 该片数据所属的块的大小 + BlkSize int + + // 将大的数据切成4M一块,每个块都有一个index + // 该片数据所属的块的index + Index int + + // 上传该片发生的错误 + Err error + + // 是否调用了mkblk接口在后台创建了该片所属的块 + Created bool + + // 上传块的返回值 + Ret *BlkputRet +} + +// ShouldRetry 是否需要重新上传 +func (b *Chunk) ShouldRetry() bool { + return b.Err != nil +} + +// BlockLength 返回实际要上传的数据的长度 +func (b *Chunk) ChunkLength() (int, error) { + n, err := api.SeekerLen(b.Body) + if err != nil { + return 0, err + } + return int(n), nil +} + +// ResetBody 重置Body到开头 +func (b *Chunk) ResetBody() error { + _, err := b.Body.Seek(0, io.SeekStart) + return err +} + +// Reset 重置Body和Err +func (b *Chunk) Reset() error { + err := b.ResetBody() + if err != nil { + return err + } + b.Err = nil + return nil +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_v2.go b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_v2.go new file mode 100644 index 0000000..6853f12 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/resume_uploader_v2.go @@ -0,0 +1,397 @@ +package storage + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + + "github.com/qiniu/go-sdk/v7/client" +) + +// ResumeUploaderV2 表示一个分片上传 v2 的对象 +type ResumeUploaderV2 struct { + Client *client.Client + Cfg *Config +} + +// NewResumeUploaderV2 表示构建一个新的分片上传的对象 +func NewResumeUploaderV2(cfg *Config) *ResumeUploaderV2 { + return NewResumeUploaderV2Ex(cfg, nil) +} + +// NewResumeUploaderV2Ex 表示构建一个新的分片上传 v2 的对象 +func NewResumeUploaderV2Ex(cfg *Config, clt *client.Client) *ResumeUploaderV2 { + if cfg == nil { + cfg = &Config{} + } + + if clt == nil { + clt = &client.DefaultClient + } + + return &ResumeUploaderV2{ + Client: clt, + Cfg: cfg, + } +} + +// Put 方法用来上传一个文件,支持断点续传和分块上传。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。详细见 RputV2Extra 结构的描述。 +// +func (p *ResumeUploaderV2) Put(ctx context.Context, ret interface{}, upToken string, key string, f io.ReaderAt, fsize int64, extra *RputV2Extra) error { + return p.rput(ctx, ret, upToken, key, true, f, fsize, nil, extra) +} + +func (p *ResumeUploaderV2) PutWithoutSize(ctx context.Context, ret interface{}, upToken, key string, r io.Reader, extra *RputV2Extra) error { + return p.rputWithoutSize(ctx, ret, upToken, key, true, r, extra) +} + +// PutWithoutKey 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 +// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 +// fsize 是要上传的文件大小。 +// extra 是上传的一些可选项。详细见 RputV2Extra 结构的描述。 +// +func (p *ResumeUploaderV2) PutWithoutKey(ctx context.Context, ret interface{}, upToken string, f io.ReaderAt, fsize int64, extra *RputV2Extra) error { + return p.rput(ctx, ret, upToken, "", false, f, fsize, nil, extra) +} + +// PutFile 用来上传一个文件,支持断点续传和分块上传。 +// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项。详细见 RputV2Extra 结构的描述。 +// +func (p *ResumeUploaderV2) PutFile(ctx context.Context, ret interface{}, upToken, key, localFile string, extra *RputV2Extra) error { + return p.rputFile(ctx, ret, upToken, key, true, localFile, extra) +} + +// PutFileWithoutKey 上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 +// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 +// 和 PutWithoutKey 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 +// +// ctx 是请求的上下文。 +// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 +// upToken 是由业务服务器颁发的上传凭证。 +// localFile 是要上传的文件的本地路径。 +// extra 是上传的一些可选项。详细见 RputV2Extra 结构的描述。 +// +func (p *ResumeUploaderV2) PutFileWithoutKey(ctx context.Context, ret interface{}, upToken, localFile string, extra *RputV2Extra) error { + return p.rputFile(ctx, ret, upToken, "", false, localFile, extra) +} + +func (p *ResumeUploaderV2) rput(ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, f io.ReaderAt, fsize int64, fileDetails *fileDetailsInfo, extra *RputV2Extra) (err error) { + if extra == nil { + extra = &RputV2Extra{} + } + extra.init() + + var ( + upHost, accessKey, bucket, recorderKey string + fileInfo os.FileInfo = nil + ) + + if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + return + } + if extra.UpHost != "" { + upHost = extra.getUpHost(p.Cfg.UseHTTPS) + } else { + upHost, err = p.resumeUploaderAPIs().upHost(accessKey, bucket) + if err != nil { + return + } + } + + if extra.Recorder != nil && fileDetails != nil { + recorderKey = extra.Recorder.GenerateRecorderKey( + []string{accessKey, bucket, key, "v2", fileDetails.fileFullPath, strconv.FormatInt(extra.PartSize, 10)}, + fileDetails.fileInfo) + fileInfo = fileDetails.fileInfo + } + + return uploadByWorkers( + newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, upHost, fileInfo, extra, ret, recorderKey), + ctx, newSizedChunkReader(f, fsize, extra.PartSize), extra.TryTimes) +} + +func (p *ResumeUploaderV2) rputWithoutSize(ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, r io.Reader, extra *RputV2Extra) (err error) { + if extra == nil { + extra = &RputV2Extra{} + } + extra.init() + + var accessKey, bucket, upHost string + if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + return + } + if extra.UpHost != "" { + upHost = extra.getUpHost(p.Cfg.UseHTTPS) + } else { + upHost, err = p.resumeUploaderAPIs().upHost(accessKey, bucket) + if err != nil { + return + } + } + + return uploadByWorkers( + newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, upHost, nil, extra, ret, ""), + ctx, newUnsizedChunkReader(r, extra.PartSize), extra.TryTimes) +} + +func (p *ResumeUploaderV2) rputFile(ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, localFile string, extra *RputV2Extra) (err error) { + var ( + file *os.File + fileInfo os.FileInfo + fileDetails *fileDetailsInfo + ) + + if file, err = os.Open(localFile); err != nil { + return + } + defer file.Close() + + if fileInfo, err = file.Stat(); err != nil { + return + } + + if fullPath, absErr := filepath.Abs(file.Name()); absErr == nil { + fileDetails = &fileDetailsInfo{fileFullPath: fullPath, fileInfo: fileInfo} + } + + return p.rput(ctx, ret, upToken, key, hasKey, file, fileInfo.Size(), fileDetails, extra) +} + +// 初始化块请求 +func (p *ResumeUploaderV2) InitParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, ret *InitPartsRet) error { + return p.resumeUploaderAPIs().initParts(ctx, upToken, upHost, bucket, key, hasKey, ret) +} + +// 发送块请求 +func (p *ResumeUploaderV2) UploadParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, uploadId string, partNumber int64, partMD5 string, ret *UploadPartsRet, body io.Reader, size int) error { + return p.resumeUploaderAPIs().uploadParts(ctx, upToken, upHost, bucket, key, hasKey, uploadId, partNumber, partMD5, ret, body, int64(size)) +} + +// 完成块请求 +func (p *ResumeUploaderV2) CompleteParts(ctx context.Context, upToken, upHost string, ret interface{}, bucket, key string, hasKey bool, uploadId string, extra *RputV2Extra) (err error) { + return p.resumeUploaderAPIs().completeParts(ctx, upToken, upHost, ret, bucket, key, hasKey, uploadId, extra) +} + +func (p *ResumeUploaderV2) UpHost(ak, bucket string) (upHost string, err error) { + return p.resumeUploaderAPIs().upHost(ak, bucket) +} + +func (p *ResumeUploaderV2) resumeUploaderAPIs() *resumeUploaderAPIs { + return &resumeUploaderAPIs{Client: p.Client, Cfg: p.Cfg} +} + +type ( + // 用于实现 resumeUploaderBase 的 V2 分片接口 + resumeUploaderV2Impl struct { + client *client.Client + cfg *Config + bucket string + key string + hasKey bool + uploadId string + upToken string + upHost string + extra *RputV2Extra + fileInfo os.FileInfo + recorderKey string + ret interface{} + lock sync.Mutex + bufPool *sync.Pool + } + + resumeUploaderV2RecoveryInfoContext struct { + Offset int64 `json:"o"` + Etag string `json:"e"` + PartSize int `json:"s"` + PartNumber int64 `json:"p"` + } + + resumeUploaderV2RecoveryInfo struct { + FileSize int64 `json:"s"` + ModTimeStamp int64 `json:"m"` + UploadId string `json:"i"` + Contexts []resumeUploaderV2RecoveryInfoContext `json:"c"` + } +) + +func newResumeUploaderV2Impl(resumeUploader *ResumeUploaderV2, bucket, key string, hasKey bool, upToken string, upHost string, fileInfo os.FileInfo, extra *RputV2Extra, ret interface{}, recorderKey string) *resumeUploaderV2Impl { + return &resumeUploaderV2Impl{ + client: resumeUploader.Client, + cfg: resumeUploader.Cfg, + bucket: bucket, + key: key, + hasKey: hasKey, + upToken: upToken, + upHost: upHost, + fileInfo: fileInfo, + recorderKey: recorderKey, + extra: extra, + ret: ret, + bufPool: &sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, extra.PartSize)) + }, + }, + } +} + +func (impl *resumeUploaderV2Impl) initUploader(ctx context.Context) ([]int64, error) { + var ( + recovered []int64 + ret InitPartsRet + ) + + if impl.extra.Recorder != nil { + if recorderData, err := impl.extra.Recorder.Get(impl.recorderKey); err == nil { + if recovered = impl.recover(ctx, recorderData); len(recovered) > 0 { + return recovered, nil + } + } + } + + err := impl.resumeUploaderAPIs().initParts(ctx, impl.upToken, impl.upHost, impl.bucket, impl.key, impl.hasKey, &ret) + if err == nil { + impl.uploadId = ret.UploadID + } + return nil, err +} + +func (impl *resumeUploaderV2Impl) uploadChunk(ctx context.Context, c chunk) error { + var ( + apis = impl.resumeUploaderAPIs() + ret UploadPartsRet + chunkSize int64 + buffer = impl.bufPool.Get().(*bytes.Buffer) + err error + ) + defer impl.bufPool.Put(buffer) + buffer.Reset() + + partNumber := c.id + 1 + hasher := md5.New() + + chunkSize, err = io.Copy(hasher, io.TeeReader(io.NewSectionReader(c.reader, 0, c.size), buffer)) + if err != nil { + impl.extra.NotifyErr(partNumber, err) + return err + } else if chunkSize == 0 { + return nil + } + + md5Value := hex.EncodeToString(hasher.Sum(nil)) + + err = apis.uploadParts(ctx, impl.upToken, impl.upHost, impl.bucket, impl.key, impl.hasKey, impl.uploadId, + partNumber, md5Value, &ret, buffer, chunkSize) + + if err != nil { + impl.extra.NotifyErr(partNumber, err) + } else { + impl.extra.Notify(partNumber, &ret) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + func() { + impl.lock.Lock() + defer impl.lock.Unlock() + impl.extra.Progresses = append(impl.extra.Progresses, UploadPartInfo{ + Etag: ret.Etag, PartNumber: partNumber, partSize: int(chunkSize), fileOffset: c.offset, + }) + impl.save(ctx) + }() + } + return err +} + +func (impl *resumeUploaderV2Impl) final(ctx context.Context) error { + if impl.extra.Recorder != nil { + impl.extra.Recorder.Delete(impl.recorderKey) + } + + sort.Sort(uploadPartInfos(impl.extra.Progresses)) + return impl.resumeUploaderAPIs().completeParts(ctx, impl.upToken, impl.upHost, impl.ret, impl.bucket, impl.key, impl.hasKey, impl.uploadId, impl.extra) +} + +func (impl *resumeUploaderV2Impl) recover(ctx context.Context, recoverData []byte) (recovered []int64) { + var recoveryInfo resumeUploaderV2RecoveryInfo + if err := json.Unmarshal(recoverData, &recoveryInfo); err != nil { + return + } + if impl.fileInfo == nil || recoveryInfo.FileSize != impl.fileInfo.Size() || recoveryInfo.ModTimeStamp != impl.fileInfo.ModTime().UnixNano() { + return + } + impl.uploadId = recoveryInfo.UploadId + + for _, c := range recoveryInfo.Contexts { + impl.extra.Progresses = append(impl.extra.Progresses, UploadPartInfo{ + Etag: c.Etag, PartNumber: c.PartNumber, fileOffset: c.Offset, partSize: c.PartSize, + }) + recovered = append(recovered, int64(c.Offset)) + } + + return +} + +func (impl *resumeUploaderV2Impl) save(ctx context.Context) { + var ( + recoveryInfo resumeUploaderV2RecoveryInfo + recoveredData []byte + err error + ) + + if impl.fileInfo == nil || impl.extra.Recorder == nil { + return + } + + recoveryInfo.FileSize = impl.fileInfo.Size() + recoveryInfo.ModTimeStamp = impl.fileInfo.ModTime().UnixNano() + recoveryInfo.UploadId = impl.uploadId + recoveryInfo.Contexts = make([]resumeUploaderV2RecoveryInfoContext, 0, len(impl.extra.Progresses)) + + for _, progress := range impl.extra.Progresses { + recoveryInfo.Contexts = append(recoveryInfo.Contexts, resumeUploaderV2RecoveryInfoContext{ + Offset: progress.fileOffset, Etag: progress.Etag, PartSize: progress.partSize, PartNumber: progress.PartNumber, + }) + } + + if recoveredData, err = json.Marshal(recoveryInfo); err != nil { + return + } + + impl.extra.Recorder.Set(impl.recorderKey, recoveredData) +} + +func (impl *resumeUploaderV2Impl) resumeUploaderAPIs() *resumeUploaderAPIs { + return &resumeUploaderAPIs{Client: impl.client, Cfg: impl.cfg} +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/token.go b/vendor/github.com/qiniu/go-sdk/v7/storage/token.go new file mode 100644 index 0000000..4311bc6 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/token.go @@ -0,0 +1,156 @@ +package storage + +import ( + "encoding/base64" + "encoding/json" + "errors" + "strings" + "time" + + "github.com/qiniu/go-sdk/v7/auth" +) + +// PutPolicy 表示文件上传的上传策略,参考 https://developer.qiniu.com/kodo/manual/1206/put-policy +type PutPolicy struct { + + // 指定上传的目标资源空间 Bucket 和资源键 Key(最大为 750 字节)。有三种格式: + // ,表示允许用户上传文件到指定的 bucket。在这种格式下文件只能新增(分片上传 v1 版 需要指定 insertOnly 为 1 才是新增,否则也为覆盖上传),若已存在同名资源(且文件内容/etag不一致),上传会失败;若已存在资源的内容/etag一致,则上传会返回成功。 + // :,表示只允许用户上传指定 key 的文件。在这种格式下文件默认允许修改,若已存在同名资源则会被覆盖。如果只希望上传指定 key 的文件,并且不允许修改,那么可以将下面的 insertOnly 属性值设为 1。 + // :,表示只允许用户上传指定以 keyPrefix 为前缀的文件,当且仅当 isPrefixalScope 字段为 1 时生效,isPrefixalScope 为 1 时无法覆盖上传。 + Scope string `json:"scope"` + + // 若为 1,表示允许用户上传以 scope 的 keyPrefix 为前缀的文件。 + IsPrefixalScope int `json:"isPrefixalScope,omitempty"` + + // 上传凭证有效截止时间。Unix时间戳,单位为秒。该截止时间为上传完成后,在七牛空间生成文件的校验时间,而非上传的开始时间, + // 一般建议设置为上传开始时间 + 3600s,用户可根据具体的业务场景对凭证截止时间进行调整。 + Expires uint64 `json:"deadline"` + + // 若非0, 即使Scope为 Bucket:Key 的形式也是insert only + InsertOnly uint16 `json:"insertOnly,omitempty"` + + // 唯一属主标识。特殊场景下非常有用,例如根据 App-Client 标识给图片或视频打水印。 + EndUser string `json:"endUser,omitempty"` + + // Web 端文件上传成功后,浏览器执行 303 跳转的 URL。通常用于表单上传。 + // 文件上传成功后会跳转到 ?upload_ret=包含 returnBody 内容。 + // 如不设置 returnUrl,则直接将 returnBody 的内容返回给客户端。 + ReturnURL string `json:"returnUrl,omitempty"` + + // 上传成功后,自定义七牛云最终返回給上传端(在指定 returnUrl 时是携带在跳转路径参数中)的数据。支持魔法变量和自定义变量。 + // returnBody 要求是合法的 JSON 文本。 + // 例如 {“key”: $(key), “hash”: $(etag), “w”: $(imageInfo.width), “h”: $(imageInfo.height)}。 + ReturnBody string `json:"returnBody,omitempty"` + + // 上传成功后,七牛云向业务服务器发送 POST 请求的 URL。必须是公网上可以正常进行 POST 请求并能响应 HTTP/1.1 200 OK 的有效 URL。 + // 另外,为了给客户端有一致的体验,我们要求 callbackUrl 返回包 Content-Type 为 “application/json”,即返回的内容必须是合法的 + // JSON 文本。出于高可用的考虑,本字段允许设置多个 callbackUrl(用英文符号 ; 分隔),在前一个 callbackUrl 请求失败的时候会依次 + // 重试下一个 callbackUrl。一个典型例子是:http:///callback;http:///callback,并同时指定下面的 callbackHost 字段。 + // 在 callbackUrl 中使用 ip 的好处是减少对 dns 解析的依赖,可改善回调的性能和稳定性。指定 callbackUrl,必须指定 callbackbody, + // 且值不能为空。 + CallbackURL string `json:"callbackUrl,omitempty"` + + // 上传成功后,七牛云向业务服务器发送回调通知时的 Host 值。与 callbackUrl 配合使用,仅当设置了 callbackUrl 时才有效。 + CallbackHost string `json:"callbackHost,omitempty"` + + // 上传成功后,七牛云向业务服务器发送 Content-Type: application/x-www-form-urlencoded 的 POST 请求。业务服务器可以通过直接读取 + // 请求的 query 来获得该字段,支持魔法变量和自定义变量。callbackBody 要求是合法的 url query string。 + // 例如key=$(key)&hash=$(etag)&w=$(imageInfo.width)&h=$(imageInfo.height)。如果callbackBodyType指定为application/json, + // 则callbackBody应为json格式,例如:{“key”:"$(key)",“hash”:"$(etag)",“w”:"$(imageInfo.width)",“h”:"$(imageInfo.height)"}。 + CallbackBody string `json:"callbackBody,omitempty"` + + // 上传成功后,七牛云向业务服务器发送回调通知 callbackBody 的 Content-Type。默认为 application/x-www-form-urlencoded,也可设置 + // 为 application/json。 + CallbackBodyType string `json:"callbackBodyType,omitempty"` + + // 资源上传成功后触发执行的预转持久化处理指令列表。fileType=2或3(上传归档存储或深度归档存储文件)时,不支持使用该参数。支持魔法变量和自 + // 定义变量。每个指令是一个 API 规格字符串,多个指令用;分隔。请参阅persistenOps详解与示例。同时添加 persistentPipeline 字段,使用专 + // 用队列处理,请参阅persistentPipeline。 + PersistentOps string `json:"persistentOps,omitempty"` + + // 接收持久化处理结果通知的 URL。必须是公网上可以正常进行 POST 请求并能响应 HTTP/1.1 200 OK 的有效 URL。该 URL 获取的内容和持久化处 + // 理状态查询的处理结果一致。发送 body 格式是 Content-Type 为 application/json 的 POST 请求,需要按照读取流的形式读取请求的 body + // 才能获取。 + PersistentNotifyURL string `json:"persistentNotifyUrl,omitempty"` + + // 转码队列名。资源上传成功后,触发转码时指定独立的队列进行转码。为空则表示使用公用队列,处理速度比较慢。建议使用专用队列。 + PersistentPipeline string `json:"persistentPipeline,omitempty"` + + // saveKey 的优先级设置。为 true 时,saveKey不能为空,会忽略客户端指定的key,强制使用saveKey进行文件命名。参数不设置时, + // 默认值为false + ForceSaveKey bool `json:"forceSaveKey,omitempty"` // + + // 自定义资源名。支持魔法变量和自定义变量。forceSaveKey 为false时,这个字段仅当用户上传的时候没有主动指定 key 时起作用; + // forceSaveKey 为true时,将强制按这个字段的格式命名。 + SaveKey string `json:"saveKey,omitempty"` + + // 限定上传文件大小最小值,单位Byte。小于限制上传文件大小的最小值会被判为上传失败,返回 403 状态码 + FsizeMin int64 `json:"fsizeMin,omitempty"` + + // 限定上传文件大小最大值,单位Byte。超过限制上传文件大小的最大值会被判为上传失败,返回 413 状态码。 + FsizeLimit int64 `json:"fsizeLimit,omitempty"` + + // 开启 MimeType 侦测功能,并按照下述规则进行侦测;如不能侦测出正确的值,会默认使用 application/octet-stream 。 + // 设为非 0 值,则忽略上传端传递的文件 MimeType 信息,并按如下顺序侦测 MimeType 值: + // 1. 侦测内容; 2. 检查文件扩展名; 3. 检查 Key 扩展名。 + // 默认设为 0 值,如上传端指定了 MimeType 则直接使用该值,否则按如下顺序侦测 MimeType 值: + // 1. 检查文件扩展名; 2. 检查 Key 扩展名; 3. 侦测内容。 + DetectMime uint8 `json:"detectMime,omitempty"` + + // 限定用户上传的文件类型。指定本字段值,七牛服务器会侦测文件内容以判断 MimeType,再用判断值跟指定值进行匹配,匹配成功则允许上传,匹配失败则返回 403 状态码。示例: + // image/* 表示只允许上传图片类型 + // image/jpeg;image/png 表示只允许上传 jpg 和 png 类型的图片 + // !application/json;text/plain 表示禁止上传 json 文本和纯文本。注意最前面的感叹号! + MimeLimit string `json:"mimeLimit,omitempty"` + + // 资源的存储类型,0表示标准存储,1 表示低频存储,2 表示归档存储,3 表示深度归档存储。 + FileType int `json:"fileType,omitempty"` + + CallbackFetchKey uint8 `json:"callbackFetchKey,omitempty"` + + DeleteAfterDays int `json:"deleteAfterDays,omitempty"` +} + +// UploadToken 方法用来进行上传凭证的生成 +// 该方法生成的过期时间是现对于现在的时间 +func (p *PutPolicy) UploadToken(cred *auth.Credentials) string { + return p.uploadToken(cred) +} + +func (p PutPolicy) uploadToken(cred *auth.Credentials) (token string) { + if p.Expires == 0 { + p.Expires = 3600 // 默认一小时过期 + } + p.Expires += uint64(time.Now().Unix()) + putPolicyJSON, _ := json.Marshal(p) + token = cred.SignWithData(putPolicyJSON) + return +} + +func getAkBucketFromUploadToken(token string) (ak, bucket string, err error) { + items := strings.Split(token, ":") + // KODO-11919 + if len(items) == 5 && items[0] == "" { + items = items[2:] + } else if len(items) != 3 { + err = errors.New("invalid upload token, format error") + return + } + + ak = items[0] + policyBytes, dErr := base64.URLEncoding.DecodeString(items[2]) + if dErr != nil { + err = errors.New("invalid upload token, invalid put policy") + return + } + + putPolicy := PutPolicy{} + uErr := json.Unmarshal(policyBytes, &putPolicy) + if uErr != nil { + err = errors.New("invalid upload token, invalid put policy") + return + } + + bucket = strings.Split(putPolicy.Scope, ":")[0] + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go b/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go new file mode 100644 index 0000000..1e1a500 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go @@ -0,0 +1,595 @@ +// package storage 提供了用户存储配置(uc)方面的功能, 定义了UC API 的返回结构体类型 +package storage + +import ( + "context" + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/qiniu/go-sdk/v7/auth" +) + +// BucketSummary 存储空间信息 +type BucketSummary struct { + // 存储空间名字 + Name string `json:"name"` + Info BucketInfo `json:"info"` +} + +// BucketInfo 存储空间的详细信息 +type BucketInfo struct { + // 镜像回源地址, 接口返回的多个地址以;分割 + Source string `json:"source"` + + // 镜像回源的时候请求头中的HOST + Host string `json:"host"` + + // 镜像回源地址过期时间(秒数), 现在这个功能没有实现,因此这个字段现在是没有意义的 + Expires int `json:"expires"` + + // 是否开启了原图保护 + Protected int `json:"protected"` + + // 是否是私有空间 + Private int `json:"private"` + + // 如果NoIndexPage是false表示开启了空间根目录index.html + // 如果是true, 表示没有开启 + // 开启了根目录下的index.html, 文件将会被作为默认首页展示 + NoIndexPage int `json:"no_index_page"` + + // 在规定的时效内使客户端缓存更新的效果 + MaxAge int `json:"max_age"` + + // 图片样式分隔符, 接口返回的可能有多个 + Separator string `json:"separator"` + + // 图片样式, map中的key表示图片样式命令名字 + // map中的value表示图片样式命令的内容 + Styles map[string]string `json:"styles"` + + // 防盗链模式 + // 1 - 表示设置了防盗链的referer白名单 + // 2 - 表示设置了防盗链的referer黑名单 + AntiLeechMode int `json:"anti_leech_mode"` + + // 使用token签名进行防盗链 + // 0 - 表示关闭 + // 1 - 表示开启 + TokenAntiLeechMode int `json:"token_anti_leech"` + + // 防盗链referer白名单列表 + ReferWl []string `json:"refer_wl"` + + // 防盗链referer黑名单列表 + ReferBl []string `json:"refer_bl"` + + // 在源站支持的情况下开启源站的Referer防盗链 + EnableSource bool `json:"source_enabled"` + + // 是否允许空的referer访问 + NoRefer bool `json:"no_refer"` + + // 用于防盗链token的生成 + MacKey string `json:"mac_key"` + + // 用于防盗链token的生成 + MacKey2 string `json:"mac_key2"` + + // 存储区域, 兼容保留 + Zone string + + // 存储区域 + Region string +} + +// ReferAntiLeechConfig 是用户存储空间的Refer防盗链配置 +type ReferAntiLeechConfig struct { + // 防盗链模式, 0 - 关闭Refer防盗链, 1 - 开启Referer白名单,2 - 开启Referer黑名单 + Mode int + + // 是否允许空的referer访问 + AllowEmptyReferer bool + + // Pattern 匹配HTTP Referer头, 当模式是1或者2的时候有效 + // Mode为1的时候表示允许Referer符合该Pattern的HTTP请求访问 + // Mode为2的时候表示禁止Referer符合该Pattern的HTTP请求访问 + // 当前允许的匹配字符串格式分为三种: + // 一种为空主机头域名, 比如 foo.com; 一种是泛域名, 比如 *.bar.com; + // 一种是完全通配符, 即一个 *; + // 多个规则之间用;隔开, 比如: foo.com;*.bar.com;sub.foo.com;*.sub.bar.com + Pattern string + + // 是否开启源站的防盗链, 默认为0, 只开启CDN防盗链, 当设置为1的时候 + // 在源站支持的情况下开启源站的Referer防盗链 + EnableSource bool +} + +// SetMode 设置referer防盗链模式 +func (r *ReferAntiLeechConfig) SetMode(mode int) *ReferAntiLeechConfig { + if mode != 0 && mode != 1 && mode != 2 { + panic("Referer anti_leech_mode must be in [0, 1, 2]") + } + r.Mode = mode + return r +} + +// SetEmptyReferer 设置是否允许空Referer访问 +func (r *ReferAntiLeechConfig) SetEmptyReferer(enable bool) *ReferAntiLeechConfig { + r.AllowEmptyReferer = enable + return r +} + +// SetPattern 设置匹配Referer的模式 +func (r *ReferAntiLeechConfig) SetPattern(pattern string) *ReferAntiLeechConfig { + if pattern == "" { + panic("Empty pattern is not allowed") + } + + r.Pattern = pattern + return r +} + +// AddDomainPattern 添加pattern到Pattern字段 +// 假入Pattern值为"*.qiniu.com", 使用AddDomainPattern("*.baidu.com")后 +// r.Pattern的值为"*.qiniu.com;*.baidu.com" +func (r *ReferAntiLeechConfig) AddDomainPattern(pattern string) *ReferAntiLeechConfig { + if strings.HasSuffix(r.Pattern, ";") { + r.Pattern = strings.TrimRight(r.Pattern, ";") + } + r.Pattern = strings.Join([]string{r.Pattern, pattern}, ";") + return r +} + +// SetEnableSource 设置是否开启源站的防盗链 +func (r *ReferAntiLeechConfig) SetEnableSource(enable bool) *ReferAntiLeechConfig { + r.EnableSource = enable + return r +} + +// AsQueryString 编码成query参数格式 +func (r *ReferAntiLeechConfig) AsQueryString() string { + params := make(url.Values, 4) + params.Add("mode", strconv.Itoa(r.Mode)) + params.Add("pattern", r.Pattern) + if r.AllowEmptyReferer { + params.Add("norefer", "1") + } else { + params.Add("norefer", "0") + } + if r.EnableSource { + params.Add("source_enabled", "1") + } else { + params.Add("source_enabled", "0") + } + return params.Encode() +} + +// ProtectedOn 返回true or false +// 如果开启了原图保护,返回true, 否则false +func (b *BucketInfo) ProtectedOn() bool { + return b.Protected == 1 +} + +// IsPrivate 返回布尔值 +// 如果是私有空间, 返回 true, 否则返回false +func (b *BucketInfo) IsPrivate() bool { + return b.Private == 1 +} + +// ImageSources 返回多个镜像回源地址的列表 +func (b *BucketInfo) ImageSources() (srcs []string) { + srcs = strings.Split(b.Source, ";") + return +} + +// IndexPageOn 返回空间是否开启了根目录下的index.html +func (b *BucketInfo) IndexPageOn() bool { + return b.NoIndexPage == 0 +} + +// Separators 返回分隔符列表 +func (b *BucketInfo) Separators() (ret []rune) { + for _, r := range b.Separator { + ret = append(ret, r) + } + return +} + +// WhiteListSet 是否设置了防盗链白名单 +func (b *BucketInfo) WhiteListSet() bool { + return b.AntiLeechMode == 1 +} + +// BlackListSet 是否设置了防盗链黑名单 +func (b *BucketInfo) BlackListSet() bool { + return b.AntiLeechMode == 2 +} + +// TokenAntiLeechModeOn 返回是否使用token签名防盗链开启了 +func (b *BucketInfo) TokenAntiLeechModeOn() bool { + return b.TokenAntiLeechMode == 1 +} + +// GetBucketInfo 返回BucketInfo结构 +func (m *BucketManager) GetBucketInfo(bucketName string) (bucketInfo BucketInfo, err error) { + reqURL := fmt.Sprintf("%s/v2/bucketInfo?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucketName) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &bucketInfo, "POST", reqURL, nil) + return +} + +// BucketInfosForRegion 获取指定区域的该用户的所有bucketInfo信息 +func (m *BucketManager) BucketInfosInRegion(region RegionID, statistics bool) (bucketInfos []BucketSummary, err error) { + reqURL := fmt.Sprintf("%s/v2/bucketInfos?region=%s&fs=%t", getUcHost(m.Cfg.UseHTTPS), string(region), statistics) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &bucketInfos, "POST", reqURL, nil) + return +} + +// SetReferAntiLeechMode 配置存储空间referer防盗链模式 +func (m *BucketManager) SetReferAntiLeechMode(bucketName string, refererAntiLeechConfig *ReferAntiLeechConfig) (err error) { + reqURL := fmt.Sprintf("%s/referAntiLeech?bucket=%s&%s", getUcHost(m.Cfg.UseHTTPS), bucketName, refererAntiLeechConfig.AsQueryString()) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// BucketLifeCycleRule 定义了关于七牛存储空间关于生命周期的一些配置,规则。 +// 比如存储空间中文件可以设置多少天后删除,多少天后转低频存储等等 +type BucketLifeCycleRule struct { + // 规则名称, 在设置的bucket中规则名称需要是唯一的 + // 同时长度小于50, 不能为空 + // 由字母,数字和下划线组成 + Name string `json:"name"` + + // 以该前缀开头的文件应用此规则 + Prefix string `json:"prefix"` + + // 指定存储空间内的文件多少天后删除 + // 0 - 不删除 + // > 0 表示多少天后删除 + DeleteAfterDays int `json:"delete_after_days"` + + // 在多少天后转低频存储 + // 0 - 表示不转低频 + // < 0 表示上传的文件立即使用低频存储 + // > 0 表示多少天后转低频存储 + ToLineAfterDays int `json:"to_line_after_days"` + + // 指定文件上传多少天后转归档存储。 + // 0 表示不转归档存储, + // < 0 表示上传的文件立即变归档存储 + // > 0 表示多少天后转归档存储 + ToArchiveAfterDays int `json:"to_archive_after_days"` + + // 指定文件上传多少天后转深度归档存储。 + // < 0 表示上传的文件立即变深度归档存储 + // > 0 表示多少天后转深度归档存储 + ToDeepArchiveAfterDays int `json:"to_deep_archive_after_days"` +} + +// SetBucketLifeCycleRule 设置存储空间内文件的生命周期规则 +func (m *BucketManager) AddBucketLifeCycleRule(bucketName string, lifeCycleRule *BucketLifeCycleRule) (err error) { + params := make(map[string][]string) + + // 没有检查参数的合法性,交给服务端检查 + params["bucket"] = []string{bucketName} + params["name"] = []string{lifeCycleRule.Name} + params["prefix"] = []string{lifeCycleRule.Prefix} + params["delete_after_days"] = []string{strconv.Itoa(lifeCycleRule.DeleteAfterDays)} + params["to_ia_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToLineAfterDays)} + params["to_archive_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToArchiveAfterDays)} + params["to_deep_archive_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToDeepArchiveAfterDays)} + + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/add" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return + +} + +// DelBucketLifeCycleRule 删除特定存储空间上设定的规则 +func (m *BucketManager) DelBucketLifeCycleRule(bucketName, ruleName string) (err error) { + params := make(map[string][]string) + + params["bucket"] = []string{bucketName} + params["name"] = []string{ruleName} + + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/delete" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return +} + +// UpdateBucketLifeCycleRule 更新特定存储空间上的生命周期规则 +func (m *BucketManager) UpdateBucketLifeCycleRule(bucketName string, rule *BucketLifeCycleRule) (err error) { + params := make(map[string][]string) + + params["bucket"] = []string{bucketName} + params["name"] = []string{rule.Name} + params["prefix"] = []string{rule.Prefix} + params["delete_after_days"] = []string{strconv.Itoa(rule.DeleteAfterDays)} + params["to_line_after_days"] = []string{strconv.Itoa(rule.ToLineAfterDays)} + params["to_archive_after_days"] = []string{strconv.Itoa(rule.ToArchiveAfterDays)} + params["to_deep_archive_after_days"] = []string{strconv.Itoa(rule.ToDeepArchiveAfterDays)} + + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/update" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return +} + +// GetBucketLifeCycleRule 获取指定空间上设置的生命周期规则 +func (m *BucketManager) GetBucketLifeCycleRule(bucketName string) (rules []BucketLifeCycleRule, err error) { + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/get?bucket=" + bucketName + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &rules, "GET", reqURL, nil) + return +} + +// BucketEnvent 定义了存储空间发生事件时候的通知规则 +// 比如调用了存储的"delete"删除接口删除文件, 这个是一个事件; +// 当这个事件发生的时候, 我们要对哪些文件,做什么处理,是否要作回调, +// 都可以通过这个结构体配置 +type BucketEventRule struct { + // 规则名字 + Name string `json:"name"` + + // 匹配文件前缀 + Prefix string `json:"prefix"` + + // 匹配文件后缀 + Suffix string `json:"suffix"` + + // 事件类型 + // put,mkfile,delete,copy,move,append,disable,enable,deleteMarkerCreate + Event []string `json:"event"` + + // 回调通知地址, 可以指定多个 + CallbackURL []string `json:"callback_urls"` + + // 用户的AccessKey, 可选, 设置的话会对通知请求用对应的ak、sk进行签名 + AccessKey string `json:"access_key"` + + // 回调通知的请求HOST, 可选 + Host string `json:"host"` +} + +// Params 返回一个hash结构 +func (r *BucketEventRule) Params(bucket string) map[string][]string { + params := make(map[string][]string) + + params["bucket"] = []string{bucket} + params["name"] = []string{r.Name} + if r.Prefix != "" { + params["prefix"] = []string{r.Prefix} + } + if r.Suffix != "" { + params["suffix"] = []string{r.Suffix} + } + params["event"] = r.Event + params["callbackURL"] = r.CallbackURL + if r.AccessKey != "" { + params["access_key"] = []string{r.AccessKey} + } + if r.Host != "" { + params["host"] = []string{r.Host} + } + return params +} + +// AddBucketEvent 增加存储空间事件通知规则 +func (m *BucketManager) AddBucketEvent(bucket string, rule *BucketEventRule) (err error) { + params := rule.Params(bucket) + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/add" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return +} + +// DelBucketEvent 删除指定存储空间的通知事件规则 +func (m *BucketManager) DelBucketEvent(bucket, ruleName string) (err error) { + params := make(map[string][]string) + params["bucket"] = []string{bucket} + params["name"] = []string{ruleName} + + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/delete" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return +} + +// UpdateBucketEnvent 更新指定存储空间的事件通知规则 +func (m *BucketManager) UpdateBucketEnvent(bucket string, rule *BucketEventRule) (err error) { + params := rule.Params(bucket) + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/update" + err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, params) + return +} + +// GetBucketEvent 获取指定存储空间的事件通知规则 +func (m *BucketManager) GetBucketEvent(bucket string) (rule []BucketEventRule, err error) { + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/get?bucket=" + bucket + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &rule, "GET", reqURL, nil) + return +} + +// CorsRule 是关于存储的跨域规则 +// 最多允许设置10条跨域规则 +// 对于同一个域名如果设置了多条规则,那么按顺序使用第一条匹配的规则去生成返回值。 +// 对于简单跨域请求,只匹配 Origin; +// 对于预检请求, 需要匹配 Origin、AllowedMethod、AllowedHeader; + +// 如果没有设置任何corsRules,那么默认允许所有的跨域请求 +// 参考: https://www.w3.org/TR/cors/ +type CorsRule struct { + + // allowed_orgin: 允许的域名。必填;支持通配符*;*表示全部匹配;只有第一个*生效;需要设置"Scheme";大小写敏感。例如 + // 规则:http://*.abc.*.com 请求:"http://test.abc.test.com" 结果:不通过 + // 规则:"http://abc.com" 请求:"https://abc.com"/"abc.com" 结果:不通过 + // 规则:"abc.com" 请求:"http://abc.com" 结果:不通过 + AllowedOrigin []string `json:"allowed_origin"` + + // allowed_method: 允许的方法。必填;不支持通配符;大小写不敏感; + AllowedMethod []string `json:"allowed_method"` + + // allowed_header: 允许的header。选填;支持通配符*,但只能是单独的*,表示允许全部header,其他*不生效;空则不允许任何header;大小写不敏感; + AllowedHeader []string `json:"allowed_header"` + + // 暴露的header。选填;不支持通配符;X-Log, X-Reqid是默认会暴露的两个header;其他的header如果没有设置,则不会暴露;大小写不敏感; + ExposedHeader []string `json:"exposed_header"` + + // max_age: 结果可以缓存的时间。选填;空则不缓存 + MaxAge int64 `json:"max_age"` +} + +// AddCorsRules 设置指定存储空间的跨域规则 +func (m *BucketManager) AddCorsRules(bucket string, corsRules []CorsRule) (err error) { + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/corsRules/set/" + bucket + err = m.Client.CredentialedCallWithJson(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil, corsRules) + return +} + +// GetCorsRules 获取指定存储空间的跨域规则 +func (m *BucketManager) GetCorsRules(bucket string) (corsRules []CorsRule, err error) { + reqURL := getUcHost(m.Cfg.UseHTTPS) + "/corsRules/get/" + bucket + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &corsRules, "GET", reqURL, nil) + return +} + +// BucketQuota 七牛存储空间的配额信息 +type BucketQuota struct { + // 如果HTTP请求没有发送该参数或者发送的参数是0,表示不更改当前配置 + // 如果是-1, 表示取消限额 + // 一下两个参数都使用于这个逻辑 + + // 空间存储量配额信息 + Size int64 + + // 空间文件数配置信息 + Count int64 +} + +// SetBucketQuota 设置存储空间的配额限制 +// 配额限制主要是两块, 空间存储量的限制和空间文件数限制 +func (m *BucketManager) SetBucketQuota(bucket string, size, count int64) (err error) { + reqHost, rErr := m.z0ApiHost() + if rErr != nil { + err = rErr + return + } + reqHost = strings.TrimRight(reqHost, "/") + reqURL := fmt.Sprintf("%s/setbucketquota/%s/size/%d/count/%d", reqHost, bucket, size, count) + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + return +} + +// GetBucketQuota 获取存储空间的配额信息 +func (m *BucketManager) GetBucketQuota(bucket string) (quota BucketQuota, err error) { + reqHost, rErr := m.z0ApiHost() + if rErr != nil { + err = rErr + return + } + reqHost = strings.TrimRight(reqHost, "/") + reqURL := reqHost + "/getbucketquota/" + bucket + err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, "a, "POST", reqURL, nil) + return +} + +// SetBucketAccessStyle 可以用来开启或关闭制定存储空间的原图保护 +// mode - 1 ==> 开启原图保护 +// mode - 0 ==> 关闭原图保护 +func (m *BucketManager) SetBucketAccessStyle(bucket string, mode int) error { + reqURL := fmt.Sprintf("%s/accessMode/%s/mode/%d", getUcHost(m.Cfg.UseHTTPS), bucket, mode) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// TurnOffBucketProtected 开启指定存储空间的原图保护 +func (m *BucketManager) TurnOnBucketProtected(bucket string) error { + return m.SetBucketAccessStyle(bucket, 1) +} + +// TurnOffBucketProtected 关闭指定空间的原图保护 +func (m *BucketManager) TurnOffBucketProtected(bucket string) error { + return m.SetBucketAccessStyle(bucket, 0) +} + +// SetBucketMaxAge 设置指定存储空间的MaxAge响应头 +// maxAge <= 0时,表示使用默认值31536000 +func (m *BucketManager) SetBucketMaxAge(bucket string, maxAge int64) error { + reqURL := fmt.Sprintf("%s/maxAge?bucket=%s&maxAge=%d", getUcHost(m.Cfg.UseHTTPS), bucket, maxAge) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// SetBucketAccessMode 设置指定空间的私有属性 + +// mode - 1 表示设置空间为私有空间, 私有空间访问需要鉴权 +// mode - 0 表示设置空间为公开空间 +func (m *BucketManager) SetBucketAccessMode(bucket string, mode int) error { + reqURL := fmt.Sprintf("%s/private?bucket=%s&private=%d", getUcHost(m.Cfg.UseHTTPS), bucket, mode) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// MakeBucketPublic 设置空间为公有空间 +func (m *BucketManager) MakeBucketPublic(bucket string) error { + return m.SetBucketAccessMode(bucket, 0) +} + +// MakeBucketPrivate 设置空间为私有空间 +func (m *BucketManager) MakeBucketPrivate(bucket string) error { + return m.SetBucketAccessMode(bucket, 1) +} + +// TurnOnIndexPage 设置默认首页 +func (m *BucketManager) TurnOnIndexPage(bucket string) error { + return m.setIndexPage(bucket, 0) +} + +// TurnOnIndexPage 关闭默认首页 +func (m *BucketManager) TurnOffIndexPage(bucket string) error { + return m.setIndexPage(bucket, 1) +} + +func (m *BucketManager) setIndexPage(bucket string, noIndexPage int) error { + reqURL := fmt.Sprintf("%s/noIndexPage?bucket=%s&noIndexPage=%d", getUcHost(m.Cfg.UseHTTPS), bucket, noIndexPage) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) +} + +// BucketTagging 为 Bucket 设置标签 +type BucketTagging struct { + Tags []BucketTag `json:"Tags"` +} + +type BucketTag struct { + Key string `json:"Key"` + Value string `json:"Value"` +} + +// SetTagging 设置 Bucket 标签 + +// 该方法为覆盖所有 Bucket 上之前设置的标签,标签 Key 最大 64 字节,Value 最大 128 字节,均不能为空,且区分大小写 +// Key 不能以 kodo 为前缀,Key 和 Value 的字符只能为:字母,数字,空格,+,-,=,.,_,:,/,@,不能支持中文 +func (m *BucketManager) SetTagging(bucket string, tags map[string]string) error { + tagging := BucketTagging{Tags: make([]BucketTag, 0, len(tags))} + for key, value := range tags { + tagging.Tags = append(tagging.Tags, BucketTag{Key: key, Value: value}) + } + + reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) + return m.Client.CredentialedCallWithJson(context.Background(), m.Mac, auth.TokenQiniu, nil, "PUT", reqURL, nil, &tagging) +} + +// ClearTagging 清空 Bucket 标签 +func (m *BucketManager) ClearTagging(bucket string) error { + reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) + return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "DELETE", reqURL, nil) +} + +// GetTagging 获取 Bucket 标签 +func (m *BucketManager) GetTagging(bucket string) (tags map[string]string, err error) { + var tagging BucketTagging + reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) + if err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &tagging, "GET", reqURL, nil); err != nil { + return + } + tags = make(map[string]string, len(tagging.Tags)) + for _, tag := range tagging.Tags { + tags[tag.Key] = tag.Value + } + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/uploader_base.go b/vendor/github.com/qiniu/go-sdk/v7/storage/uploader_base.go new file mode 100644 index 0000000..80212a5 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/uploader_base.go @@ -0,0 +1,23 @@ +package storage + +func getUpHost(config *Config, ak, bucket string) (upHost string, err error) { + var zone *Zone + if config.Zone != nil { + zone = config.Zone + } else if zone, err = GetZone(ak, bucket); err != nil { + return + } + + scheme := "http://" + if config.UseHTTPS { + scheme = "https://" + } + + host := zone.SrcUpHosts[0] + if config.UseCdnDomains { + host = zone.CdnUpHosts[0] + } + + upHost = scheme + host + return +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/util.go b/vendor/github.com/qiniu/go-sdk/v7/storage/util.go new file mode 100644 index 0000000..6c8fb8c --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/util.go @@ -0,0 +1,22 @@ +package storage + +import ( + "time" +) + +// ParsePutTime 提供了将PutTime转换为 time.Time 的功能 +func ParsePutTime(putTime int64) (t time.Time) { + t = time.Unix(0, putTime*100) + return +} + +// IsContextExpired 检查分片上传的ctx是否过期,提前一天让它过期 +// 因为我们认为如果断点继续上传的话,最长需要1天时间 +func IsContextExpired(blkPut BlkputRet) bool { + if blkPut.Ctx == "" { + return false + } + target := time.Unix(blkPut.ExpiredAt, 0).AddDate(0, 0, -1) + now := time.Now() + return now.After(target) +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go b/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go new file mode 100644 index 0000000..213d39e --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go @@ -0,0 +1,50 @@ +package storage + +// Zone 是Region的别名 +// 兼容保留 +type Zone = Region + +// GetZone 用来根据ak和bucket来获取空间相关的机房信息 +// 新版本使用GetRegion, 这个函数用来保持兼容 +func GetZone(ak, bucket string) (zone *Zone, err error) { + return GetRegion(ak, bucket) +} + +var ( + // 华东机房 + // 兼容保留 + ZoneHuadong, _ = GetRegionByID(RIDHuadong) + + // 华北机房 + // 兼容保留 + ZoneHuabei, _ = GetRegionByID(RIDHuabei) + + // 华南机房 + // 兼容保留 + ZoneHuanan, _ = GetRegionByID(RIDHuanan) + + // 北美机房 + // 兼容保留 + ZoneBeimei, _ = GetRegionByID(RIDNorthAmerica) + + // 新加坡机房 + // 兼容保留 + ZoneXinjiapo, _ = GetRegionByID(RIDSingapore) + + // 雾存储华东机房 + // 兼容保留 + ZoneFogCnEast1, _ = GetRegionByID(RIDFogCnEast1) + + // 兼容保留 + Zone_z0 = ZoneHuadong + // 兼容保留 + Zone_z1 = ZoneHuabei + // 兼容保留 + Zone_z2 = ZoneHuanan + // 兼容保留 + Zone_na0 = ZoneBeimei + // 兼容保留 + Zone_as0 = ZoneXinjiapo + // 兼容保留 + Zone_fog_cn_east_1 = ZoneFogCnEast1 +) diff --git a/vendor/github.com/qiniu/go-sdk/v7/types.go b/vendor/github.com/qiniu/go-sdk/v7/types.go new file mode 100644 index 0000000..3654710 --- /dev/null +++ b/vendor/github.com/qiniu/go-sdk/v7/types.go @@ -0,0 +1,40 @@ +package api + +import ( + "io" + "io/ioutil" + "net/http" +) + +// BytesFromRequest 读取http.Request.Body的内容到slice中 +func BytesFromRequest(r *http.Request) (b []byte, err error) { + if r.ContentLength == 0 { + return + } + if r.ContentLength > 0 { + b = make([]byte, int(r.ContentLength)) + _, err = io.ReadFull(r.Body, b) + return + } + return ioutil.ReadAll(r.Body) +} + +func SeekerLen(s io.Seeker) (int64, error) { + + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/.bumpversion.cfg b/vendor/github.com/tencentyun/cos-go-sdk-v5/.bumpversion.cfg new file mode 100644 index 0000000..d2bac6d --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +commit = True +tag = True +current_version = 0.7.0 + +[bumpversion:file:cos.go] + diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/.gitignore b/vendor/github.com/tencentyun/cos-go-sdk-v5/.gitignore new file mode 100644 index 0000000..4dac9e1 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +dist/ +cover.html +cover.out +covprofile +coverage.html +example/CI/media_process/media_process diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/.travis.yml b/vendor/github.com/tencentyun/cos-go-sdk-v5/.travis.yml new file mode 100644 index 0000000..6d3e284 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/.travis.yml @@ -0,0 +1,32 @@ +language: go +go: +- master +sudo: false +before_install: +- go get -u github.com/mattn/goveralls +- go get -u github.com/stretchr/testify +install: +- go get +- go build +- go build github.com/mattn/goveralls +script: +- if [[ ! -n "$COS_SECRETID" ]]; then exit 0 + ; fi +- make test +- make ci-test +- go test -coverprofile=cover.out github.com/toranger/cos-go-sdk-v5 +- "${TRAVIS_HOME}/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out" +matrix: + allow_failures: + - go: 1.7 + - go: master +env: + global: + - secure: XXB/cFVnJcAzhOZ2/zplwjhhhireQQGGRbNscPgQ0kpUQCyPZ6oIHvJMafuP4TVTJHEdMiaDxm0HNvgARuopXVaQNmK2UZj6xw40Ud7OT7ZUnw88xkQkXOI5GwG8oz9LqxIXUSItHegKXRLW0e1PoBdjZNv6lxGFAtuOcl9ekAg/q2lGIIQFefz6NK7gCmGYULKe+4J15VFldoYNM0JesxxxArTvtv8+k+U53oUwy9dex6z5oIA1zGIeKLcOD2xXgbjid/Ett3t0B2w3GfJWoM9rGV0eHgveOAUGe5tQkMKvl5LK1hj+93ZmU0MAG7x7t9jYKrFPqU/eDNJRMb4Ro6L7lIXVEKaBUkLx28PnwFQ5D043GBVtQGqYNcldZXIfbyYEHQZlD/BWFOt5YqTpGg+7Wm4NC3Yffqsurzk54juT7FftzVy0A8MFkqO+c5RHrOSUlm01pWXkGLHgZhUP5gEZEuUaoluSQTZksmAUJZ7F8DxwpE4SYBqfN27PZ87rWDNyOqNv1w1trzwx2IfdHHA+vfCZ7UM5e85gxFWUO2tJCUai2q21v3gBrcAgBOb6BwVzbWAorM2zY20f0l21XxOWMakA+r4JJA3s3EmcczcQeeL6pkFIAh+qKdFEPuyQTjH1mGpPzYFNbWtvPXijQo5PqyGrKL8W1t3ovwXMXoE= + - secure: bep0PPD/oYW5zY0QpeeC+WgFIya5DNRVmR92MO+e5BdFlSJPhstoG8bRh91EeftzC/Hyd3PUEIglPqTgZPxwysqW/81plsU95wV3qJi9gPi7+ZtYXH4xZTnaqgZsTr7jsKSVoKHSu7XqCtbSytW8YMN9wRWzG19/9hX2Z79Q6yNy5l9856Oyj1E2IXDjdZLPsWDhnZ8Vvk1wAVy2fc2esqKzHAZwm8n9vee2yR8vz7GXUszzpKvn4R43eNzdlFEHCmN0ANmxLJZmnYDpZHHfNf4slts+0S6I7awFXppuXUDaJPBRCia4XoFeSw+01IW1Vi0kAwvGLhxjJCWc4M/4ZU0byXDT11tDFvWa19NmnbYiizWiXNVecn1oNWYJqIKe7TTAMAtHSXAPmLX0rXuXKzwM09W6yrLFufCxyix9IOnenEbe9WwSdBbhmeLF3Wu/uVGkDog/FsXJM75sk956vV9UKh9zF4B9/NR8szJMF7shEs0Fbru5UUWheqg4AadPl3dhAWuj2+6NANa1LpH3JVD3II9dlXeMmMvsSwDvrYUaX/S8tf6JwZG0zCJK0TYp05rjxH+NIzWaMUTY7+HwYqqK3pOW3San0SlZiMq8N7GSnKUZ7WRQXYSB4gXHrg+mWyeVC7XnqiRtCwVi+LtPMu+YUbg7dwVi0vtKjYZYIUY= + - secure: Ob28vrOuHMKNKEtChkWbsaVv2SwLhcxXMnvGe4XN+y3mFvdhYnwpt6NdgThF8OCZ0761tvTRmvALfiZnO0uORjTtoHKkVPrnVIxlCcode0NVJZNHGn2fqjemdLKCnSeX7hm+9zeLpCnIvC+Sp3iZ3t2AH4AzgFx6nirWO3HwT5l9rNL9Q1CfwlOpNJJ36r9JTHwQnXmOfOmszUNoZ3rtiFXJ8dCi+BgY0lsiIRSiDkAH7KAPf86REM+ww81AaXG4/RuYx1Vj5zQCtZN7XEOViSXEbqqb8SrIFOccDu5FV12djg+4QS7FSjLVGrdIUcn4oI6pS24Et3oXf8xFx6JLYyGGhgZ2BsyJEx5vLQvkTWnMTrwZVRtCQ+g6lMUQpJhL2rBrmVBUqBFb5IH69O7corQm53n5qLM8IiosAQLfbOtML/1PyEpKCG2aOx1377Fx2yzxXW3ucP1PBqCzli0oCM2T52LfiNvZTzkIU6XJebBnzkZXepzOIFSur86kxgvQFElw9ro2X6XXPKU5S25xVaUSvaN1kmqLSkToJ9S1rmDYXnJR4aH0R2GcLw+EkMHFJJoAjnRHxrB4/1vOJbzmfS+qy6ShRhUMSD8gk4YJ6Y7o9h7oekuWOEn+XGhl29U9T5OApzHfoPEGZwLnpHxAiKJtQtv/TNhBIOFCjigsF7U= +notifications: + email: + recipients: + - wjielai@tencent.com + - fysntian@tencent.com diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/CHANGELOG.md b/vendor/github.com/tencentyun/cos-go-sdk-v5/CHANGELOG.md new file mode 100644 index 0000000..090b6e0 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/CHANGELOG.md @@ -0,0 +1,628 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). + +## [v0.7.35](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.34...v0.7.35) - 2022-05-20 + +1、CI - 支持媒体信息任务,支持转封装加密,支持流抽取任务 +2、CI - 添加模板类型 +3、校验存储桶名称,内部域名dns打散,检查删除行为 +4、增加获取签名方法 +5、CI - 新增模板类型,工作流增删改查 + +### Merged + +- Cos v4 dev [`#175`](https://github.com/tencentyun/cos-go-sdk-v5/pull/175) + +### Commits + +- 新增模板类型 [`26c0bd6`](https://github.com/tencentyun/cos-go-sdk-v5/commit/26c0bd63c4f196bb829f34e374977b581aa82c90) +- 添加模板类型 [`11b0b48`](https://github.com/tencentyun/cos-go-sdk-v5/commit/11b0b48ad5d653455d5fa281913787c2d114bc05) +- 补充任务类型 [`84d9be2`](https://github.com/tencentyun/cos-go-sdk-v5/commit/84d9be24bbaf475d38cd98a096800fcd0cb0f009) +- 新增转码模板 [`2fb8865`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2fb8865ea0f70b61c1427862ae71252ba0437797) +- workflow [`3d28a02`](https://github.com/tencentyun/cos-go-sdk-v5/commit/3d28a0212353a5d0d6ff7e64e26e18fdfb4fd8a7) +- 工作流增删改查 [`c176883`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c176883a7412c29a649d9d1d39e9dca689babfe2) +- 修改demo中的bucket和appid [`0ce8af7`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0ce8af7f4057bb6d53077aee0a507dc960438cf7) +- 批量处理任务 [`7427269`](https://github.com/tencentyun/cos-go-sdk-v5/commit/7427269d2d82a1a7111331714a234bd4213dc191) +- ci 图片处理任务demo提交 [`cc07430`](https://github.com/tencentyun/cos-go-sdk-v5/commit/cc074303295e9e548c49ddd4403982b0db079db6) +- check bucketname, dns scatter [`77a1232`](https://github.com/tencentyun/cos-go-sdk-v5/commit/77a1232edd98478846b4c059fca7b7a0c3b3f2fa) +- add get_signature [`51699c6`](https://github.com/tencentyun/cos-go-sdk-v5/commit/51699c658095753a6875a8391986b55c8bde0876) +- 支持流抽取任务和增加3个color值 [`ee129a9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ee129a9d8764f1969cc9e110460e5a5fb60a2ce1) +- 添加工作流相关结构体 [`2b70cbc`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2b70cbc7903d1c74f6fdd50ebc34a6566600563e) +- 支持转封装加密和同步媒体信息struct复用异步任务的结构 [`6000c57`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6000c5725a7dace8eae163dd5b26668d2140a974) +- update ci ImageProcessResult, update ut [`55f2349`](https://github.com/tencentyun/cos-go-sdk-v5/commit/55f2349736555d4d8037087cd40b3e03e4a44382) +- 支持媒体信息任务 [`449f4e4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/449f4e42bb478ee1b1a609fa81f5f86911925e52) +- add opt header to GetObjectVersion [`85bc4ea`](https://github.com/tencentyun/cos-go-sdk-v5/commit/85bc4eaea499b16be69f8c5f07e5eaf4d6e9693c) +- check delete behavior [`07068b5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/07068b576885309547668d0ba23383aa9600175b) +- 删除调试信息 [`a60f7dd`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a60f7dd035755c20a29c11bd4254185e9961c9b8) +- update dns scatter [`c0527ad`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c0527adfff89c13144f5f53c3dd1396df60c9723) + +## [v0.7.34](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.33...v0.7.34) - 2022-03-01 + +1、新增 CI 云查毒接口 +2、图片和视频审核增加未成年人场景,更新图片审核参数 +3、完善工作流实例查询接口 +4、增加 CredentialIface 鉴权接口 +5、新增语音识别任务 +6、更新bucket inventory、增加Bucket Head可选头部参数。 + +### Merged + +- Cos v4 dev [`#167`](https://github.com/tencentyun/cos-go-sdk-v5/pull/167) + +### Commits + +- 完善任务接口 [`619bc6b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/619bc6be68d0e00530cbd4bf75e1debb2101b4ec) +- 完善工作流实例查询接口 [`091e4df`](https://github.com/tencentyun/cos-go-sdk-v5/commit/091e4dffa3918e531444465a978d8c34e60eb9de) +- add:支持md5和进度 [`b4c4a90`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b4c4a906a608bcb3776adbcf3fda4fb60643ea8e) +- add credential [`72d8904`](https://github.com/tencentyun/cos-go-sdk-v5/commit/72d8904b4cff8746c867b127cc0cb16cba4b27c1) +- 新增语音识别任务 [`99c1f41`](https://github.com/tencentyun/cos-go-sdk-v5/commit/99c1f410afd665f6e24fcc42b10a9195e8670662) +- 云查毒接口 [`9c253bb`](https://github.com/tencentyun/cos-go-sdk-v5/commit/9c253bbe18f56928b32bdb00fa2829ce84c7a365) +- 图片审核增加大图压缩参数 [`8be801b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/8be801ba2a617ee56dfb722d8d2388ef8e3f2a36) +- 完善工作流实例查询接口 [`89bf394`](https://github.com/tencentyun/cos-go-sdk-v5/commit/89bf394df434e1c93ab8891b39996794176b8e13) +- add:支持万象域名下媒体接口 [`117b5d3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/117b5d3a28a6de35d1ffbc80a4e41bad5836bf9b) +- Updated CHANGELOG.md [`cb64742`](https://github.com/tencentyun/cos-go-sdk-v5/commit/cb64742158e726119a541f156faa3216fbdf5e85) +- Updated CHANGELOG.md [`f2dc7ef`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f2dc7efdce50c2114520941df06bdc49e15214cd) +- add opt header in bucket head [`33a7ea2`](https://github.com/tencentyun/cos-go-sdk-v5/commit/33a7ea2683e7608d794011143ad1a01943760a66) +- update download [`0f3c6f4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0f3c6f4ac1709b11823356704c3c6f0225d754aa) +- update multicopy [`1a1a581`](https://github.com/tencentyun/cos-go-sdk-v5/commit/1a1a5810b308feec05fc92ef67d5a298149efef1) +- update fetch demo [`9cc16ad`](https://github.com/tencentyun/cos-go-sdk-v5/commit/9cc16ad671b723472287ce003acee48b34228e00) +- 网页审核dataid参数 [`b408223`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b408223459252423a7385a447bb5454e60478897) +- update bucket inventory [`18ecdbd`](https://github.com/tencentyun/cos-go-sdk-v5/commit/18ecdbde98e03b80826d7f10ba8d3b783dbdd845) +- update ut [`108941e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/108941e6ae203e6e138d38009012866adad429f1) +- 图片和视频审核增加未成年人场景 [`df7e3f9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/df7e3f9740ed5e387038b0b27f5378b16c0ab712) +- 图片审核兼容terrorism拼法 [`8fcaab6`](https://github.com/tencentyun/cos-go-sdk-v5/commit/8fcaab682ef9645d6de255d154879740d056e8eb) + +## [v0.7.33](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.32...v0.7.33) - 2021-12-07 + +1、update the sign +2、update demo +3、Optimization phase II + +### Merged + +- Cos v4 dev [`#164`](https://github.com/tencentyun/cos-go-sdk-v5/pull/164) +- update presignurl [`#161`](https://github.com/tencentyun/cos-go-sdk-v5/pull/161) + +### Commits + +- update auth [`b5482a8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b5482a8a4b3a87aa136e3be493b0b7b97f3ffcc8) +- 内容审核网页审核接口 [`47bfeab`](https://github.com/tencentyun/cos-go-sdk-v5/commit/47bfeabd2ec0c88af9a1acb09eb116ebd3b4ce73) +- 内容审核RequestId参数 [`1900e2c`](https://github.com/tencentyun/cos-go-sdk-v5/commit/1900e2cc80447edbb3ad0e8f01993529fa23d413) +- update demo [`803cdd8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/803cdd823383fa19d990c04f03152b7c580eaa06) +- update demo [`3d30e27`](https://github.com/tencentyun/cos-go-sdk-v5/commit/3d30e2772f174f51354140cf3962e87274f0657d) +- update PresignedURL && add batchGet demo [`4c06894`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4c068949a6a9b71c8e8b282be606e62bc1a8533f) +- 图片批量审核接口 [`55968d8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/55968d88111aa86ebb189bcd0c8cc0d2a2bc9783) +- Updated CHANGELOG.md [`cdb5d3c`](https://github.com/tencentyun/cos-go-sdk-v5/commit/cdb5d3c23d38068c896b7bcbb79021d8d4113a5a) +- 音频审核查询结果分片返回多个 [`4d05757`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4d05757ff4ee42c63191fa6b782f6bc1757b8eb3) +- add params check [`03ccac5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/03ccac5f6d5715e0814d7c825bfd9c6879d52018) +- 内容审核DataId参数 [`5eabaa9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/5eabaa9b4148e072da7223f2420bb9cdc1373b85) + +## [v0.7.32](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.31...v0.7.32) - 2021-10-28 + +1、add ci interface +2、add ut + +### Merged + +- add ut [`#160`](https://github.com/tencentyun/cos-go-sdk-v5/pull/160) +- add CI 媒体处理接口 [`#159`](https://github.com/tencentyun/cos-go-sdk-v5/pull/159) +- Cos v4 dev [`#158`](https://github.com/tencentyun/cos-go-sdk-v5/pull/158) +- update auth signed header [`#157`](https://github.com/tencentyun/cos-go-sdk-v5/pull/157) +- Cos v4 dev [`#156`](https://github.com/tencentyun/cos-go-sdk-v5/pull/156) +- update PresignedURL [`#155`](https://github.com/tencentyun/cos-go-sdk-v5/pull/155) +- Cos v4 dev [`#154`](https://github.com/tencentyun/cos-go-sdk-v5/pull/154) +- Cos v4 dev [`#153`](https://github.com/tencentyun/cos-go-sdk-v5/pull/153) +- Cos v4 dev [`#152`](https://github.com/tencentyun/cos-go-sdk-v5/pull/152) +- Cos v4 dev [`#151`](https://github.com/tencentyun/cos-go-sdk-v5/pull/151) +- add:增加截图任务参数 [`#150`](https://github.com/tencentyun/cos-go-sdk-v5/pull/150) + +### Commits + +- add fetch task api && add cvm role [`d2e9d91`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d2e9d919bdd6dba423ac39f0aa2d882ff85a4963) +- 内容审核六期接口 [`87f4943`](https://github.com/tencentyun/cos-go-sdk-v5/commit/87f49432d0514158982bdccda4a15c6504c98701) +- add retry options [`b0532f5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b0532f56c62b132897e0a3b333cbc43f1f5393da) +- add:解析回调body [`52bf0c7`](https://github.com/tencentyun/cos-go-sdk-v5/commit/52bf0c72b54bfad999edd9a542c9b44af61a2b58) +- add:支持多任务接口 [`84a85a1`](https://github.com/tencentyun/cos-go-sdk-v5/commit/84a85a159760405704c61e1085e790f378a3bf24) +- add:支持雪碧图任务 [`1f98f78`](https://github.com/tencentyun/cos-go-sdk-v5/commit/1f98f7857cd2cc5b70dbb1ae60628f4310c0c5c3) +- ci 分段任务struct demo提交 [`6b85e7b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6b85e7b9e72f223fab3eb9d5b1eea4b61bf7740c) +- add:支持动图任务 [`96b7f6d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/96b7f6d79015704a4cae2bcf153eb601b290cf0c) +- fix:调整指针数组为实例十足 [`54fa46a`](https://github.com/tencentyun/cos-go-sdk-v5/commit/54fa46a83631caba6570d9276fe287ea4420d6bf) +- update ci doc [`48d7d86`](https://github.com/tencentyun/cos-go-sdk-v5/commit/48d7d868fe7dec6040a597a07ca3babef41e83b9) +- update [`d7b154b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d7b154bab1506ffa2b7ddbd834b64bfbc082a6a2) +- add:xml可见性 [`0263d6a`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0263d6a6394a80c87a7c1c1507e048bdeffb15d5) +- update demo [`e9c0af3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e9c0af3ad1eb0582e9517c4f80af922e0e181469) +- update demo [`6372b2e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6372b2ef3cbc66be72e77e222e3d49c0d3ced254) +- fix:截图时输出文件是多个 [`134713e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/134713eac32327ff6b09aaa57f95ec53fca28221) + +## [v0.7.31](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.30...v0.7.31) - 2021-09-07 + +更新CI接口、更新高级接口 + +### Merged + +- 更新ci内容审核接口 [`#149`](https://github.com/tencentyun/cos-go-sdk-v5/pull/149) +- update upload/download opt [`#148`](https://github.com/tencentyun/cos-go-sdk-v5/pull/148) +- Cos v4 dev [`#146`](https://github.com/tencentyun/cos-go-sdk-v5/pull/146) +- Cos v4 dev [`#145`](https://github.com/tencentyun/cos-go-sdk-v5/pull/145) + +### Commits + +- update download retry && bucket domain [`a63f617`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a63f6174b609f6b593cc274566b50aa5174705e6) +- add:支持拼接任务 [`ee4f335`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ee4f335957f295497adbcf81a8e34bf38ba5e106) +- add:格式话 [`81d6170`](https://github.com/tencentyun/cos-go-sdk-v5/commit/81d6170a01b6bb50140cd738e7b91e812c2e641e) +- Updated CHANGELOG.md [`4c0eb2d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4c0eb2d7b78c5d58c7a9495111fefb8bf854b31a) +- update ci_media [`17cf1d4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/17cf1d43003ba5998f23381437df03fa98a27f17) +- update upload/download [`fd0f5ed`](https://github.com/tencentyun/cos-go-sdk-v5/commit/fd0f5ed75aa97ed7719bbfd0da5913a825621094) + +## [v0.7.30](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.29...v0.7.30) - 2021-08-25 + +### Merged + +- Cos v4 dev [`#144`](https://github.com/tencentyun/cos-go-sdk-v5/pull/144) + +### Commits + +- add append && add retry [`6d06f34`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6d06f3452d0a8b6f1f8f719edf2d80e0fcce4823) +- add put with transport [`35642e5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/35642e50731639c982bf3302e1088fc7963ae9bb) +- Updated CHANGELOG.md [`a505960`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a505960fe4cbb12bbf081315a73f97b880deeb1e) +- update append [`df29a44`](https://github.com/tencentyun/cos-go-sdk-v5/commit/df29a448ac33860036422075a1e64d0bf3120c45) + +## [v0.7.29](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.28...v0.7.29) - 2021-07-27 + +update version + +### Commits + +- Updated CHANGELOG.md [`4d84465`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4d84465313582d4cd13c6138f8a510c26c419678) +- update version [`8d03ad4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/8d03ad45904c733ba1b04926672e8a1cffc98b44) + +## [v0.7.28](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.27...v0.7.28) - 2021-07-27 + +update context and auth expire time + +### Merged + +- update context [`#143`](https://github.com/tencentyun/cos-go-sdk-v5/pull/143) +- update ctx [`#141`](https://github.com/tencentyun/cos-go-sdk-v5/pull/141) + +### Commits + +- update CHANGELOG.md [`eb5f276`](https://github.com/tencentyun/cos-go-sdk-v5/commit/eb5f27672a70f8a8621d763a19a9b24427007480) +- Updated CHANGELOG.md [`100cdb8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/100cdb8ab8576d3a56c20b7e4c27993275adfdd3) +- Updated CHANGELOG.md [`925081a`](https://github.com/tencentyun/cos-go-sdk-v5/commit/925081ac2523e9e92ddd17c27fc5ab965b8e3648) +- update auth [`9795208`](https://github.com/tencentyun/cos-go-sdk-v5/commit/979520893c8ec4a1ce581261f7793849ff3afc5d) + +## [v0.7.27](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.26...v0.7.27) - 2021-06-25 + +fix the go.mod + +### Commits + +- Updated CHANGELOG.md [`43e7b7b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/43e7b7bec2538083b9e48936ce8b1b4a45ba08a6) +- update to v0.7.27 [`152b063`](https://github.com/tencentyun/cos-go-sdk-v5/commit/152b0634ca147d5695644361eaa966e81e875e73) + +## [v0.7.26](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.25...v0.7.26) - 2021-06-25 + +1、add cse-kms +2、add get-object-url +3、add unit test + +### Merged + +- update go.mod [`#139`](https://github.com/tencentyun/cos-go-sdk-v5/pull/139) +- add header to restore [`#137`](https://github.com/tencentyun/cos-go-sdk-v5/pull/137) +- update bucket get [`#136`](https://github.com/tencentyun/cos-go-sdk-v5/pull/136) +- add get object url [`#135`](https://github.com/tencentyun/cos-go-sdk-v5/pull/135) +- Complete unit tests in ci_media.go and fix several issues in ci_media.go [`#134`](https://github.com/tencentyun/cos-go-sdk-v5/pull/134) +- update test [`#131`](https://github.com/tencentyun/cos-go-sdk-v5/pull/131) +- update bucket lifecycle [`#128`](https://github.com/tencentyun/cos-go-sdk-v5/pull/128) +- add cse-kms [`#127`](https://github.com/tencentyun/cos-go-sdk-v5/pull/127) + +### Commits + +- add ces-kms [`7134982`](https://github.com/tencentyun/cos-go-sdk-v5/commit/7134982d66a52575ec5133d7a01d718175baf719) +- 补全object.go, ci.go, ci_doc.go的单元测试 [`205ee11`](https://github.com/tencentyun/cos-go-sdk-v5/commit/205ee119e9c7f7592e95286915eed785c443d589) +- add Crypto UserAgent and versionid [`6ddd327`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6ddd32755c16dcbcf89cecb785b23d32d0199ea5) +- Complete unit tests in ci_media.go and fix several issues in ci_media.go. [`d1c9ff5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d1c9ff5808b601b7a918b152c8fba7ac2d5976c4) +- prepare crypto [`6097da8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6097da89694493508c7dba2e6ab5a59c5ab66fa9) +- cse-kms done [`05379cf`](https://github.com/tencentyun/cos-go-sdk-v5/commit/05379cf3101c3f44b59dbbf1bcc14517a8c2f922) +- update go.sum [`15022ae`](https://github.com/tencentyun/cos-go-sdk-v5/commit/15022aea2ec57f3a68f4a0d6f72dcfe1d85c2fd2) +- Updated CHANGELOG.md [`efd7866`](https://github.com/tencentyun/cos-go-sdk-v5/commit/efd7866989a308b05f8834e7cfc1682fceb8b088) +- Updated CHANGELOG.md [`8e62f33`](https://github.com/tencentyun/cos-go-sdk-v5/commit/8e62f335d477bf2d83db9f73236627b8362fbd84) +- Updated CHANGELOG.md [`ba44de1`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ba44de1b0fe5fe66b89314a90ca3a7277d8345c9) +- update crypto demo [`70b911b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/70b911ba7ef0775d3ca6d3e966cc1c2bea52fd0c) +- update crypto test [`0edf665`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0edf665bf13714f57397b920ac613221a9da4209) +- update ces-kms [`3094fa7`](https://github.com/tencentyun/cos-go-sdk-v5/commit/3094fa72e8d1985528cfc0c8835ac0f01e15b2c7) +- Fix ci_media.go and example/CI/media_process/media_process.go [`ef23bab`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ef23bab44479fc5a1960b9357b64e556402527d8) +- update crypto wrap meta [`06f101d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/06f101d9e3de4cf038c2a50fc87c4f2830e397fe) +- update crypto sample [`d7608ad`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d7608ad3bc2e46f2213e4c2faaf466cd802cf24a) +- update [`e89b7b0`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e89b7b088ab69340ef04cd3ab4bb53ad7b4dc40c) + +## [v0.7.25](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.24...v0.7.25) - 2021-05-10 + +1、update ci +2、update demo +3、add download checkpoint + +### Merged + +- fix demo [`#126`](https://github.com/tencentyun/cos-go-sdk-v5/pull/126) +- add ci && download checkpoint [`#124`](https://github.com/tencentyun/cos-go-sdk-v5/pull/124) +- update bucket lifecycle,presignedURL,error format [`#120`](https://github.com/tencentyun/cos-go-sdk-v5/pull/120) +- Cos dev v5 [`#117`](https://github.com/tencentyun/cos-go-sdk-v5/pull/117) + +### Commits + +- add ci guetzli && test [`caee386`](https://github.com/tencentyun/cos-go-sdk-v5/commit/caee386405df2603c860bf08f6e39bc0d76663fe) +- add CI QRCode && Update CI [`e2bf821`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e2bf82118033b3d0317afdde8c7f0da6e767e003) +- add multidownload [`f54cbf1`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f54cbf13a574974f5916c7ac3af46a41ac7fd4e8) +- add example [`4cc93a9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4cc93a9ebd3649100363d59c40934bdf56250eb4) +- add AudioAuditing [`1b22189`](https://github.com/tencentyun/cos-go-sdk-v5/commit/1b221890652deff58fb0d7e31cf679ce3de412f8) +- download checkpoint [`4d15821`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4d158217c8b79af01b00916b1715f0c982862373) +- update download and put bucket [`da7dafc`](https://github.com/tencentyun/cos-go-sdk-v5/commit/da7dafccbdb6ae9f9809c4d0243b70976a437cdb) +- update multidownload [`2a64ad4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2a64ad473b82f4b621fa35ddc5655297c0c73955) +- deliver init header to complete in upload [`f398b20`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f398b20eeeb6c847c11a863a39e81c89bf285645) +- add directory demo [`4af950f`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4af950f313b3a12aeed3c928efad4bf1dac8c03e) +- Updated CHANGELOG.md [`ab8e9e6`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ab8e9e6980206a19433a7b133c68eb77e9278853) + +## [v0.7.24](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.23...v0.7.24) - 2021-03-27 + +update PutFromFile + +### Merged + +- Cos v4 dev [`#116`](https://github.com/tencentyun/cos-go-sdk-v5/pull/116) + +### Commits + +- add req close options & update upload simple upload threshold [`f2a0dc3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f2a0dc3aad8f8e684d44e5d350670cc20a9ad329) +- update PutFromFile [`5672500`](https://github.com/tencentyun/cos-go-sdk-v5/commit/5672500b2d0570e06166d21285f384567d03c27d) +- Updated CHANGELOG.md [`8a9f014`](https://github.com/tencentyun/cos-go-sdk-v5/commit/8a9f014dd5890ca47f9ff8fa9c384c1d8067cee1) +- PutFromFile add retry [`7a8d337`](https://github.com/tencentyun/cos-go-sdk-v5/commit/7a8d337d5c6aee90294b2a86937df92287ca34e2) +- update upload [`228fb17`](https://github.com/tencentyun/cos-go-sdk-v5/commit/228fb17936e5a3618a631e16a36fdc7769821228) + +## [v0.7.23](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.22...v0.7.23) - 2021-03-23 + +a stable release + +### Merged + +- latest 0.7.23 stable [`#113`](https://github.com/tencentyun/cos-go-sdk-v5/pull/113) + +### Commits + +- update latest 0.7.23 stable [`17c220d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/17c220d588c93afcdfce86154245daf44b196e0c) +- Updated CHANGELOG.md [`f42ee97`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f42ee9765b9267fc64345d4bb5ca389ed0528c07) + +## [v0.7.22](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.21...v0.7.22) - 2021-03-12 + +update crc + +### Merged + +- Cos v4 dev [`#111`](https://github.com/tencentyun/cos-go-sdk-v5/pull/111) +- update [`#109`](https://github.com/tencentyun/cos-go-sdk-v5/pull/109) + +### Commits + +- Updated CHANGELOG.md [`d58178d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d58178d869994363c70a4e1b846b4f8ff87ccb84) +- update crc [`23ad6e2`](https://github.com/tencentyun/cos-go-sdk-v5/commit/23ad6e2c72a815f790345446045db132abdec0dd) + +## [v0.7.21](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.20...v0.7.21) - 2021-03-08 + +高级接口增加数据校验功能 + +### Merged + +- Cos v4 dev [`#108`](https://github.com/tencentyun/cos-go-sdk-v5/pull/108) +- Cos v4 dev [`#107`](https://github.com/tencentyun/cos-go-sdk-v5/pull/107) +- Cos v4 dev [`#106`](https://github.com/tencentyun/cos-go-sdk-v5/pull/106) + +### Commits + +- add:支持媒体处理任务接口 [`917abd1`](https://github.com/tencentyun/cos-go-sdk-v5/commit/917abd14626ac6b1dd08c21d46b9a9988f537b1a) +- add:redeme里增加链接 [`d795d20`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d795d20dca35ef730c8b58ba8a1e7e27fd9810d3) +- update ci [`33b02c0`](https://github.com/tencentyun/cos-go-sdk-v5/commit/33b02c0be26631712f47c410e6d1f32f6fae477b) +- update ci put & select [`62fb57e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/62fb57e373a9b86dafbd356820c0e96b20da9ba2) +- add upload verification [`2bfb93f`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2bfb93f7d05ffdd4db0a21cbec410f97f2d51cb6) +- Updated CHANGELOG.md [`e0e66ae`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e0e66ae920c8611459d505d9f847e16e62cb666f) +- add:redeme里增加链接 [`a060fa0`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a060fa03a93c6b7bb2e303e7c270fa0df8895e24) +- update [`45e6cb6`](https://github.com/tencentyun/cos-go-sdk-v5/commit/45e6cb65c3d9758743bba26342ec02ad38f75d22) +- update [`685c58a`](https://github.com/tencentyun/cos-go-sdk-v5/commit/685c58af29276c23ff69aee377b03ed31c7eb856) +- update ci [`deaede4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/deaede440c412277e93e9c0a891089ddb92381f3) + +## [v0.7.20](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.19...v0.7.20) - 2021-02-25 + +fix multicopy and multiupload send on closed channel error + +### Merged + +- fix multicopy and multiupload send on closed channel panic [`#104`](https://github.com/tencentyun/cos-go-sdk-v5/pull/104) +- update DividePart to 64M [`#103`](https://github.com/tencentyun/cos-go-sdk-v5/pull/103) +- update test [`#102`](https://github.com/tencentyun/cos-go-sdk-v5/pull/102) + +### Commits + +- Updated CHANGELOG.md [`eb667d9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/eb667d9ed9fae2a338bee4fffc84381d00dec897) + +## [v0.7.19](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.18...v0.7.19) - 2021-02-01 + +Add Get Object Optional Header && Add Test + +### Merged + +- add get object optional and test [`#101`](https://github.com/tencentyun/cos-go-sdk-v5/pull/101) + +### Commits + +- Updated CHANGELOG.md [`d2273eb`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d2273eb6855df9c5db147ae4e1477eefc2490403) + +## [v0.7.18](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.17...v0.7.18) - 2021-01-11 + +### Merged + +- update upload [`#100`](https://github.com/tencentyun/cos-go-sdk-v5/pull/100) + +### Commits + +- Updated CHANGELOG.md [`5172396`](https://github.com/tencentyun/cos-go-sdk-v5/commit/51723968313c10a106ecf7fdb5227c41247ec066) + +## [v0.7.17](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.16...v0.7.17) - 2020-12-29 + +add multicopy + +### Merged + +- add MultiCopy [`#99`](https://github.com/tencentyun/cos-go-sdk-v5/pull/99) + +### Commits + +- Updated CHANGELOG.md [`91fc87b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/91fc87ba8af042adb0d0bf147f8fe6aa49057976) + +## [v0.7.16](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.15...v0.7.16) - 2020-12-24 + +add bucket accelerate && update upload progress + +### Merged + +- Cos v4 dev [`#98`](https://github.com/tencentyun/cos-go-sdk-v5/pull/98) + +### Commits + +- Add Bucket Accelerate [`c07e497`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c07e49771c809fab640ac9f2c31d776de9dea23d) +- update upload progress && single object length [`72e7751`](https://github.com/tencentyun/cos-go-sdk-v5/commit/72e77516044f833f60577906800a638ebb31dc83) +- Updated CHANGELOG.md [`5057561`](https://github.com/tencentyun/cos-go-sdk-v5/commit/50575619064fbd54e69745b1884756e6f6222a99) +- update version [`6da3d40`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6da3d4094cd8ca8e6840dedcb8b540c14e1f4c93) + +## [v0.7.15](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.14...v0.7.15) - 2020-12-11 + +update ci & ci document + +### Merged + +- update ci [`#96`](https://github.com/tencentyun/cos-go-sdk-v5/pull/96) + +### Commits + +- Updated CHANGELOG.md [`b8afb3f`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b8afb3f85050cee4884a46c8ed49d26bf76d10a4) + +## [v0.7.14](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.13...v0.7.14) - 2020-12-09 + +fix bucket lifecycle + +### Merged + +- update version [`#95`](https://github.com/tencentyun/cos-go-sdk-v5/pull/95) +- fix bucket lifecycle [`#94`](https://github.com/tencentyun/cos-go-sdk-v5/pull/94) + +### Commits + +- Updated CHANGELOG.md [`e3a89ee`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e3a89ee58b4f524c7ad5f2b1f0bdc688a2e39c32) + +## [v0.7.13](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.12...v0.7.13) - 2020-12-08 + +add ci document && add progress + +### Merged + +- Cos v4 dev [`#93`](https://github.com/tencentyun/cos-go-sdk-v5/pull/93) + +### Commits + +- add ci doc [`dad5b1f`](https://github.com/tencentyun/cos-go-sdk-v5/commit/dad5b1f3fbbf30958d3d8ae930b06abcaf8db5ac) +- add progress [`2afc5e1`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2afc5e192cd9bc8d6630fa929e10028ec79bde8e) +- Updated CHANGELOG.md [`e39f3e3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e39f3e3585e0478abe50b33962e1e3981a2d432c) +- update test [`4899c22`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4899c226f70ccc1bb4ac8489a2b8ff000003bc97) +- add auto changelog workflow [`ca3ea38`](https://github.com/tencentyun/cos-go-sdk-v5/commit/ca3ea38770afcd11e50570a835090c68a377157f) + +## [v0.7.12](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.11...v0.7.12) - 2020-11-25 + +update presignedurl && copy + +### Merged + +- fix ci_test [`#89`](https://github.com/tencentyun/cos-go-sdk-v5/pull/89) +- update version [`#92`](https://github.com/tencentyun/cos-go-sdk-v5/pull/92) +- update presignedurl && copy [`#91`](https://github.com/tencentyun/cos-go-sdk-v5/pull/91) + +## [v0.7.11](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.10...v0.7.11) - 2020-11-15 + +断点续传、select、CI内容审核、CI云上处理、单链接限制 + +### Merged + +- Cos v4 dev [`#90`](https://github.com/tencentyun/cos-go-sdk-v5/pull/90) + +### Commits + +- add select, and x-cos-traffic-limit [`e6f823c`](https://github.com/tencentyun/cos-go-sdk-v5/commit/e6f823c7c1ee66fe68de4aafd45b009dee4b1ac2) +- add ListMultiUploads && 断点续传 [`64d31f3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/64d31f318a223866ac16c64bbe9effcd168b696d) +- ci 内容审核 [`695c446`](https://github.com/tencentyun/cos-go-sdk-v5/commit/695c4466f5cec17acd5143777d4eae90f389f119) +- add checkpoint multi upload [`287669a`](https://github.com/tencentyun/cos-go-sdk-v5/commit/287669a677af36153d645aabd4b296114aea75fe) +- add ci post [`dd7c41c`](https://github.com/tencentyun/cos-go-sdk-v5/commit/dd7c41ca0824b607633e6a7bd0d3a838d52a91fd) +- add CI图片审核 [`98eab28`](https://github.com/tencentyun/cos-go-sdk-v5/commit/98eab2886c628a3fbbbf581aeea56759ff38700c) +- fix list uploads [`5e69c19`](https://github.com/tencentyun/cos-go-sdk-v5/commit/5e69c19d3450d06c3da270ca12e43647432d0ca4) +- add decodeURIComponent [`f6c91c9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f6c91c92d6e46869c8f7acf000eccce2d351051b) +- add checkpoint multi upload: update [`52c110b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/52c110b7ede92af97e3685d4afd73a128ddd443b) + +## [v0.7.10](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.9...v0.7.10) - 2020-09-29 + +add delete with versionid & fix MultiUpload when filesize equal 0 & Bucket/Object ACL transform + +### Merged + +- ACL转换 [`#87`](https://github.com/tencentyun/cos-go-sdk-v5/pull/87) +- fix bucket encryption & test [`#86`](https://github.com/tencentyun/cos-go-sdk-v5/pull/86) + +### Commits + +- fix MultiUpload when filesize=0 [`cb662cd`](https://github.com/tencentyun/cos-go-sdk-v5/commit/cb662cdad5225cf92be335911d1805a29e445d14) +- 多版本删除 [`0e9536d`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0e9536d989b8e47b71fa02d12cb75d8ffda8b5fc) +- update travis.yml [`b0a399e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b0a399e92dd143fb55b1ed173497644e51450835) +- update version [`5804e86`](https://github.com/tencentyun/cos-go-sdk-v5/commit/5804e86747f587402b962dcb680ebc0d7c04035d) + +## [v0.7.9](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.8...v0.7.9) - 2020-09-14 + +add bucket intelligenttiering + +### Merged + +- add bucket intelligenttiering [`#85`](https://github.com/tencentyun/cos-go-sdk-v5/pull/85) + +## [v0.7.8](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.7...v0.7.8) - 2020-08-31 + +x-cos-copy-source urlencode修正 + +### Merged + +- x-cos-copy-source urlencode修正 [`#83`](https://github.com/tencentyun/cos-go-sdk-v5/pull/83) +- Common dev [`#82`](https://github.com/tencentyun/cos-go-sdk-v5/pull/82) + +### Commits + +- add host to signature [`f9f6178`](https://github.com/tencentyun/cos-go-sdk-v5/commit/f9f617878dad23f915ecea978f065aa70486843c) + +## [v0.7.7](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.6...v0.7.7) - 2020-06-04 + +add object tagging && bucket origin && add stsv3 demo + +### Merged + +- add object tagging && bucket origin && add stsv3 demo [`#80`](https://github.com/tencentyun/cos-go-sdk-v5/pull/80) + +## [v0.7.6](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.5...v0.7.6) - 2020-05-20 + +Add PutBucketObjectVersions + +### Merged + +- add GetBucketObjectVersions & update tips when failed [`#78`](https://github.com/tencentyun/cos-go-sdk-v5/pull/78) + +### Commits + +- update version [`bcc1ed2`](https://github.com/tencentyun/cos-go-sdk-v5/commit/bcc1ed2b8317cea8a32ab193ffad0a6482d63560) + +## [v0.7.5](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.4...v0.7.5) - 2020-05-14 + +add HeaderOptions to Object.Delete + +### Merged + +- update tag 0.7.5 [`#77`](https://github.com/tencentyun/cos-go-sdk-v5/pull/77) +- feat: Object.Delete 增加 optHeader 传递 [`#75`](https://github.com/tencentyun/cos-go-sdk-v5/pull/75) + +### Commits + +- feat: 改为可选参数,向下兼容 [`2eee514`](https://github.com/tencentyun/cos-go-sdk-v5/commit/2eee5149d727a045e80d28729bb60c90614e8275) + +## [v0.7.4](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.3...v0.7.4) - 2020-04-12 + +### Merged + +- add encryption and referer [`#74`](https://github.com/tencentyun/cos-go-sdk-v5/pull/74) + +### Commits + +- version 0.7.4 [`da7722b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/da7722b10af59762b8e4f687891850650f0100a9) + +## v0.7.3 - 2020-04-02 + +### Merged + +- update ci and error [`#73`](https://github.com/tencentyun/cos-go-sdk-v5/pull/73) +- add policy and IsNoSuchKeyError function [`#72`](https://github.com/tencentyun/cos-go-sdk-v5/pull/72) +- add restore object for batch [`#69`](https://github.com/tencentyun/cos-go-sdk-v5/pull/69) +- Addbatch [`#68`](https://github.com/tencentyun/cos-go-sdk-v5/pull/68) +- inventory/logging/replication/versioning/tagging [`#66`](https://github.com/tencentyun/cos-go-sdk-v5/pull/66) +- add content-language [`#65`](https://github.com/tencentyun/cos-go-sdk-v5/pull/65) +- add host [`#64`](https://github.com/tencentyun/cos-go-sdk-v5/pull/64) +- update MultiUpload [`#63`](https://github.com/tencentyun/cos-go-sdk-v5/pull/63) +- add bucket website and domain interface [`#53`](https://github.com/tencentyun/cos-go-sdk-v5/pull/53) +- 改进:增加对Upload part - copy API的支持 [`#54`](https://github.com/tencentyun/cos-go-sdk-v5/pull/54) +- add sts policy describe [`#60`](https://github.com/tencentyun/cos-go-sdk-v5/pull/60) +- update the sts example [`#59`](https://github.com/tencentyun/cos-go-sdk-v5/pull/59) +- Automatic partitioning for MultiUpload [`#58`](https://github.com/tencentyun/cos-go-sdk-v5/pull/58) +- add sse-c and optional header for put object [`#57`](https://github.com/tencentyun/cos-go-sdk-v5/pull/57) +- fix object upload copy versionid [`#56`](https://github.com/tencentyun/cos-go-sdk-v5/pull/56) +- Fix the multiupload property [`#50`](https://github.com/tencentyun/cos-go-sdk-v5/pull/50) +- Fix the mulupload out of index panic [`#49`](https://github.com/tencentyun/cos-go-sdk-v5/pull/49) +- fix the example of get, reorder the import package [`#48`](https://github.com/tencentyun/cos-go-sdk-v5/pull/48) +- spec the test bucket only used for test [`#47`](https://github.com/tencentyun/cos-go-sdk-v5/pull/47) +- reopen the coverage report [`#46`](https://github.com/tencentyun/cos-go-sdk-v5/pull/46) +- Fix the ci test [`#45`](https://github.com/tencentyun/cos-go-sdk-v5/pull/45) +- add Server Side Encryption Option to ObjectPutHeaderOptions and fixed testing [`#44`](https://github.com/tencentyun/cos-go-sdk-v5/pull/44) +- Fix the outindex of the resp addr in head object [`#42`](https://github.com/tencentyun/cos-go-sdk-v5/pull/42) +- Add the check for anonymous user [`#41`](https://github.com/tencentyun/cos-go-sdk-v5/pull/41) +- Change the param type in listParts [`#38`](https://github.com/tencentyun/cos-go-sdk-v5/pull/38) +- Fix the options param to listparts interface [`#37`](https://github.com/tencentyun/cos-go-sdk-v5/pull/37) +- Add the error when return 200OK but body contains the error [`#34`](https://github.com/tencentyun/cos-go-sdk-v5/pull/34) +- cgi already compact, so remove the sprit [`#32`](https://github.com/tencentyun/cos-go-sdk-v5/pull/32) +- fix the blink in prefix which lead the wrong auth [`#31`](https://github.com/tencentyun/cos-go-sdk-v5/pull/31) +- Support Versioning, Replication, Inventory and Logging API [`#30`](https://github.com/tencentyun/cos-go-sdk-v5/pull/30) +- fix the panic of getting out of index with response header [`#28`](https://github.com/tencentyun/cos-go-sdk-v5/pull/28) +- According the versionid to head and get object, add the opt header of copy object [`#26`](https://github.com/tencentyun/cos-go-sdk-v5/pull/26) +- rm the panic in interface [`#24`](https://github.com/tencentyun/cos-go-sdk-v5/pull/24) +- Add presigned url and demo use it to get object [`#23`](https://github.com/tencentyun/cos-go-sdk-v5/pull/23) +- Check delete object param and fix restore name [`#22`](https://github.com/tencentyun/cos-go-sdk-v5/pull/22) +- Add PostObjectRestore interface [`#21`](https://github.com/tencentyun/cos-go-sdk-v5/pull/21) +- Add vendor files [`#19`](https://github.com/tencentyun/cos-go-sdk-v5/pull/19) +- Add the comment to declare the out of memory issue when put large loc… [`#17`](https://github.com/tencentyun/cos-go-sdk-v5/pull/17) +- Need to improve the interface in the future [`#16`](https://github.com/tencentyun/cos-go-sdk-v5/pull/16) +- Add ci test with the testify and travis, Add get/put by localfile. [`#15`](https://github.com/tencentyun/cos-go-sdk-v5/pull/15) +- url encode without / [`#14`](https://github.com/tencentyun/cos-go-sdk-v5/pull/14) +- solve the oom when send the big file with the debug request header option [`#10`](https://github.com/tencentyun/cos-go-sdk-v5/pull/10) +- remove the content-type when the request body is empty [`#9`](https://github.com/tencentyun/cos-go-sdk-v5/pull/9) +- fix format [`#6`](https://github.com/tencentyun/cos-go-sdk-v5/pull/6) +- fix format of doc [`#5`](https://github.com/tencentyun/cos-go-sdk-v5/pull/5) +- update the comment for godoc [`#4`](https://github.com/tencentyun/cos-go-sdk-v5/pull/4) +- fix test [`#3`](https://github.com/tencentyun/cos-go-sdk-v5/pull/3) +- update rm the append interface [`#2`](https://github.com/tencentyun/cos-go-sdk-v5/pull/2) + +### Commits + +- fix the vendor files [`a5b3ad3`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a5b3ad37d64c5aa82b12a57a6fdb0d2dba5ca1a8) +- first to commit project [`6ad265f`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6ad265fc0c275a4b1615c0f4702a7a992d8d299c) +- add batch [`c57ac81`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c57ac81a16da82605cc42bf9e0c8fd3af2cf62c5) +- Support Versioning, Replication, Inventory and Logging api [`c131870`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c131870916d7c29eb8c4a9512ea59ed7d7b09e18) +- fix path [`121a7d8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/121a7d861c8e0fb048aac5a8bc1a644414b16dc3) +- fix the vendor file [`830b5c5`](https://github.com/tencentyun/cos-go-sdk-v5/commit/830b5c54a51d38b91e1432c7b216350a5c479ad0) +- update bucket inventory/logging/replication/versioning/tagging and test [`d37fd23`](https://github.com/tencentyun/cos-go-sdk-v5/commit/d37fd23cdfe570886024c2c0586a2f14429a10ed) +- Add ci test with the testify and travis, test the working [`4cc9e08`](https://github.com/tencentyun/cos-go-sdk-v5/commit/4cc9e08da1a0ce670cf6d20567059b7529b440d5) +- Add ci test with the testify and travis, test the working [`c323f1b`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c323f1b90cd97b98708216a72c666d89bdbcad8b) +- rm inventory and logging example [`7739e84`](https://github.com/tencentyun/cos-go-sdk-v5/commit/7739e84d42b7a33121b16817f25129050db93db5) +- add policy [`b427226`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b427226ae44b1c0f25b8681147e828f4603a9014) +- update batch [`156deae`](https://github.com/tencentyun/cos-go-sdk-v5/commit/156deaea3caacc5249050ae1e7378ae8af909c5d) +- update bucket domain and website struct [`b97a490`](https://github.com/tencentyun/cos-go-sdk-v5/commit/b97a490e28370fecc475bf8b1a82df0d5a52a424) +- presigned url, and demo [`0c2d28c`](https://github.com/tencentyun/cos-go-sdk-v5/commit/0c2d28caf108cff2a95507a9ff286be679d7925b) +- fix ci_test [`c17efed`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c17efed0aedee60e819d3e9504720ff47958da14) +- fix [`9ebd645`](https://github.com/tencentyun/cos-go-sdk-v5/commit/9ebd6456b6aa788d0d2e97f4a8fb652c1b9a1976) +- update batch [`699fef2`](https://github.com/tencentyun/cos-go-sdk-v5/commit/699fef215987bcf7f2459b9843ce8641900135f7) +- Add the comment to declare the out of memory issue when put large local file [`6c894b8`](https://github.com/tencentyun/cos-go-sdk-v5/commit/6c894b8e4d5f0bb1ca24d444a9f8e18f95027cde) +- go module support [`54101f9`](https://github.com/tencentyun/cos-go-sdk-v5/commit/54101f9739b3e59f201092f79d683a0cf377d7d0) +- optional header for complete upload part [`44546a4`](https://github.com/tencentyun/cos-go-sdk-v5/commit/44546a40372260158652ddc6d805ddfc19f13fa3) diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/LICENSE b/vendor/github.com/tencentyun/cos-go-sdk-v5/LICENSE new file mode 100644 index 0000000..8ff7942 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 mozillazg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/Makefile b/vendor/github.com/tencentyun/cos-go-sdk-v5/Makefile new file mode 100644 index 0000000..108cfa3 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/Makefile @@ -0,0 +1,22 @@ +help: + @echo "test run test" + @echo "lint run lint" + @echo "example run examples" + +.PHONY: test +test: + go test -v -cover -coverprofile cover.out + go tool cover -html cover.out -o cover.html + +.PHONY: lint +lint: + gofmt -s -w . + goimports -w . + golint . + go vet + +.PHONY: example +example: + cd example && sh test.sh +ci-test: + cd costesting && go test -v diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/README.md b/vendor/github.com/tencentyun/cos-go-sdk-v5/README.md new file mode 100644 index 0000000..72b2b88 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/README.md @@ -0,0 +1,106 @@ +# cos-go-sdk-v5 + +腾讯云对象存储服务 COS(Cloud Object Storage) Go SDK(API 版本:V5 版本的 XML API)。 + +## Install + +`go get -u github.com/tencentyun/cos-go-sdk-v5` + + +## Usage + +```go +package main + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" + + "github.com/tencentyun/cos-go-sdk-v5" +) + +func main() { + //将修改为真实的信息 + //bucket的命名规则为{name}-{appid} ,此处填写的存储桶名称必须为此格式 + u, _ := url.Parse("https://.cos..myqcloud.com") + b := &cos.BaseURL{BucketURL: u} + c := cos.NewClient(b, &http.Client{ + //设置超时时间 + Timeout: 100 * time.Second, + Transport: &cos.AuthorizationTransport{ + //如实填写账号和密钥,也可以设置为环境变量 + SecretID: os.Getenv("COS_SECRETID"), + SecretKey: os.Getenv("COS_SECRETKEY"), + }, + }) + + name := "test/hello.txt" + resp, err := c.Object.Get(context.Background(), name, nil) + if err != nil { + panic(err) + } + bs, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + fmt.Printf("%s\n", string(bs)) +} +``` + +所有的 API 在 [example](./example/) 目录下都有对应的使用示例。 + +Service API: + +* [x] Get Service(使用示例:[service/get.go](./example/service/get.go)) + +Bucket API: + +* [x] Get Bucket(使用示例:[bucket/get.go](./example/bucket/get.go)) +* [x] Get Bucket ACL(使用示例:[bucket/getACL.go](./example/bucket/getACL.go)) +* [x] Get Bucket CORS(使用示例:[bucket/getCORS.go](./example/bucket/getCORS.go)) +* [x] Get Bucket Location(使用示例:[bucket/getLocation.go](./example/bucket/getLocation.go)) +* [x] Get Buket Lifecycle(使用示例:[bucket/getLifecycle.go](./example/bucket/getLifecycle.go)) +* [x] Get Bucket Tagging(使用示例:[bucket/getTagging.go](./example/bucket/getTagging.go)) +* [x] Put Bucket(使用示例:[bucket/put.go](./example/bucket/put.go)) +* [x] Put Bucket ACL(使用示例:[bucket/putACL.go](./example/bucket/putACL.go)) +* [x] Put Bucket CORS(使用示例:[bucket/putCORS.go](./example/bucket/putCORS.go)) +* [x] Put Bucket Lifecycle(使用示例:[bucket/putLifecycle.go](./example/bucket/putLifecycle.go)) +* [x] Put Bucket Tagging(使用示例:[bucket/putTagging.go](./example/bucket/putTagging.go)) +* [x] Delete Bucket(使用示例:[bucket/delete.go](./example/bucket/delete.go)) +* [x] Delete Bucket CORS(使用示例:[bucket/deleteCORS.go](./example/bucket/deleteCORS.go)) +* [x] Delete Bucket Lifecycle(使用示例:[bucket/deleteLifecycle.go](./example/bucket/deleteLifecycle.go)) +* [x] Delete Bucket Tagging(使用示例:[bucket/deleteTagging.go](./example/bucket/deleteTagging.go)) +* [x] Head Bucket(使用示例:[bucket/head.go](./example/bucket/head.go)) +* [x] List Multipart Uploads(使用示例:[bucket/listMultipartUploads.go](./example/bucket/listMultipartUploads.go)) + +Object API: + +* [x] Get Object(使用示例:[object/get.go](./example/object/get.go)) +* [x] Get Object ACL(使用示例:[object/getACL.go](./example/object/getACL.go)) +* [x] Put Object(使用示例:[object/put.go](./example/object/put.go)) +* [x] Put Object ACL(使用示例:[object/putACL.go](./example/object/putACL.go)) +* [x] Put Object Copy(使用示例:[object/copy.go](./example/object/copy.go)) +* [x] Delete Object(使用示例:[object/delete.go](./example/object/delete.go)) +* [x] Delete Multiple Object(使用示例:[object/deleteMultiple.go](./example/object/deleteMultiple.go)) +* [x] Head Object(使用示例:[object/head.go](./example/object/head.go)) +* [x] Options Object(使用示例:[object/options.go](./example/object/options.go)) +* [x] Initiate Multipart Upload(使用示例:[object/initiateMultipartUpload.go](./example/object/initiateMultipartUpload.go)) +* [x] Upload Part(使用示例:[object/uploadPart.go](./example/object/uploadPart.go)) +* [x] List Parts(使用示例:[object/listParts.go](./example/object/listParts.go)) +* [x] Complete Multipart Upload(使用示例:[object/completeMultipartUpload.go](./example/object/completeMultipartUpload.go)) +* [x] Abort Multipart Upload(使用示例:[object/abortMultipartUpload.go](./example/object/abortMultipartUpload.go)) +* [x] Mutipart Upload(使用示例:[object/MutiUpload.go](./example/object/MutiUpload.go)) + + +数据处理 API: + +* [x] 媒体处理(使用示例:[media_process.go](./example/CI/media_process/media_process.go)) +* [x] 文档处理(使用示例:[ci_doc_process.go](./example/CI/doc_process/ci_doc_process.go)) + + +内容审核 API: + +* [x] 视频审核(使用示例:[ci_video_auditing_job.go](./example/CI/content_auditing/ci_video_auditing_job.go)) diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/auth.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/auth.go new file mode 100644 index 0000000..683e793 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/auth.go @@ -0,0 +1,550 @@ +package cos + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "encoding/json" + "fmt" + "hash" + "io/ioutil" + math_rand "math/rand" + "net" + "net/http" + "net/url" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +const ( + sha1SignAlgorithm = "sha1" + privateHeaderPrefix = "x-cos-" + defaultAuthExpire = time.Hour +) + +var ( + defaultCVMAuthExpire = int64(600) + defaultCVMSchema = "http" + defaultCVMMetaHost = "metadata.tencentyun.com" + defaultCVMCredURI = "latest/meta-data/cam/security-credentials" + internalHost = regexp.MustCompile(`^.*cos-internal\.[a-z-1]+\.tencentcos\.cn$`) +) + +var DNSScatterDialContext = DNSScatterDialContextFunc + +var DNSScatterTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: DNSScatterDialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +func DNSScatterDialContextFunc(ctx context.Context, network string, addr string) (conn net.Conn, err error) { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + ips, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + dialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + // DNS 打散 + math_rand.Seed(time.Now().UnixNano()) + start := math_rand.Intn(len(ips)) + for i := start; i < len(ips); i++ { + conn, err = dialer.DialContext(ctx, network, net.JoinHostPort(ips[i].IP.String(), port)) + if err == nil { + return + } + } + for i := 0; i < start; i++ { + conn, err = dialer.DialContext(ctx, network, net.JoinHostPort(ips[i].IP.String(), port)) + if err == nil { + break + } + } + return +} + +// 需要校验的 Headers 列表 +var NeedSignHeaders = map[string]bool{ + "host": true, + "range": true, + "x-cos-acl": true, + "x-cos-grant-read": true, + "x-cos-grant-write": true, + "x-cos-grant-full-control": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, + "content-type": true, + "content-length": true, + "content-md5": true, + "transfer-encoding": true, + "versionid": true, + "expect": true, + "expires": true, + "x-cos-content-sha1": true, + "x-cos-storage-class": true, + "if-match": true, + "if-modified-since": true, + "if-none-match": true, + "if-unmodified-since": true, + "origin": true, + "access-control-request-method": true, + "access-control-request-headers": true, + "x-cos-object-type": true, +} + +// 非线程安全,只能在进程初始化(而不是Client初始化)时做设置 +func SetNeedSignHeaders(key string, val bool) { + NeedSignHeaders[key] = val +} + +func safeURLEncode(s string) string { + s = encodeURIComponent(s) + s = strings.Replace(s, "!", "%21", -1) + s = strings.Replace(s, "'", "%27", -1) + s = strings.Replace(s, "(", "%28", -1) + s = strings.Replace(s, ")", "%29", -1) + s = strings.Replace(s, "*", "%2A", -1) + return s +} + +type valuesSignMap map[string][]string + +func (vs valuesSignMap) Add(key, value string) { + key = strings.ToLower(safeURLEncode(key)) + vs[key] = append(vs[key], value) +} + +func (vs valuesSignMap) Encode() string { + var keys []string + for k := range vs { + keys = append(keys, k) + } + sort.Strings(keys) + + var pairs []string + for _, k := range keys { + items := vs[k] + sort.Strings(items) + for _, val := range items { + pairs = append( + pairs, + fmt.Sprintf("%s=%s", k, safeURLEncode(val))) + } + } + return strings.Join(pairs, "&") +} + +// AuthTime 用于生成签名所需的 q-sign-time 和 q-key-time 相关参数 +type AuthTime struct { + SignStartTime time.Time + SignEndTime time.Time + KeyStartTime time.Time + KeyEndTime time.Time +} + +// NewAuthTime 生成 AuthTime 的便捷函数 +// +// expire: 从现在开始多久过期. +func NewAuthTime(expire time.Duration) *AuthTime { + signStartTime := time.Now() + keyStartTime := signStartTime + signEndTime := signStartTime.Add(expire) + keyEndTime := signEndTime + return &AuthTime{ + SignStartTime: signStartTime, + SignEndTime: signEndTime, + KeyStartTime: keyStartTime, + KeyEndTime: keyEndTime, + } +} + +// signString return q-sign-time string +func (a *AuthTime) signString() string { + return fmt.Sprintf("%d;%d", a.SignStartTime.Unix(), a.SignEndTime.Unix()) +} + +// keyString return q-key-time string +func (a *AuthTime) keyString() string { + return fmt.Sprintf("%d;%d", a.KeyStartTime.Unix(), a.KeyEndTime.Unix()) +} + +// newAuthorization 通过一系列步骤生成最终需要的 Authorization 字符串 +func newAuthorization(secretID, secretKey string, req *http.Request, authTime *AuthTime, signHost bool) string { + signTime := authTime.signString() + keyTime := authTime.keyString() + signKey := calSignKey(secretKey, keyTime) + + if signHost { + req.Header.Set("Host", req.Host) + } + formatHeaders := *new(string) + signedHeaderList := *new([]string) + formatHeaders, signedHeaderList = genFormatHeaders(req.Header) + formatParameters, signedParameterList := genFormatParameters(req.URL.Query()) + formatString := genFormatString(req.Method, *req.URL, formatParameters, formatHeaders) + + stringToSign := calStringToSign(sha1SignAlgorithm, keyTime, formatString) + signature := calSignature(signKey, stringToSign) + + return genAuthorization( + secretID, signTime, keyTime, signature, signedHeaderList, + signedParameterList, + ) +} + +// AddAuthorizationHeader 给 req 增加签名信息 +func AddAuthorizationHeader(secretID, secretKey string, sessionToken string, req *http.Request, authTime *AuthTime) { + if secretID == "" { + return + } + + auth := newAuthorization(secretID, secretKey, req, + authTime, true, + ) + if len(sessionToken) > 0 { + req.Header.Set("x-cos-security-token", sessionToken) + } + req.Header.Set("Authorization", auth) +} + +// calSignKey 计算 SignKey +func calSignKey(secretKey, keyTime string) string { + digest := calHMACDigest(secretKey, keyTime, sha1SignAlgorithm) + return fmt.Sprintf("%x", digest) +} + +// calStringToSign 计算 StringToSign +func calStringToSign(signAlgorithm, signTime, formatString string) string { + h := sha1.New() + h.Write([]byte(formatString)) + return fmt.Sprintf("%s\n%s\n%x\n", signAlgorithm, signTime, h.Sum(nil)) +} + +// calSignature 计算 Signature +func calSignature(signKey, stringToSign string) string { + digest := calHMACDigest(signKey, stringToSign, sha1SignAlgorithm) + return fmt.Sprintf("%x", digest) +} + +// genAuthorization 生成 Authorization +func genAuthorization(secretID, signTime, keyTime, signature string, signedHeaderList, signedParameterList []string) string { + return strings.Join([]string{ + "q-sign-algorithm=" + sha1SignAlgorithm, + "q-ak=" + secretID, + "q-sign-time=" + signTime, + "q-key-time=" + keyTime, + "q-header-list=" + strings.Join(signedHeaderList, ";"), + "q-url-param-list=" + strings.Join(signedParameterList, ";"), + "q-signature=" + signature, + }, "&") +} + +// genFormatString 生成 FormatString +func genFormatString(method string, uri url.URL, formatParameters, formatHeaders string) string { + formatMethod := strings.ToLower(method) + formatURI := uri.Path + + return fmt.Sprintf("%s\n%s\n%s\n%s\n", formatMethod, formatURI, + formatParameters, formatHeaders, + ) +} + +// genFormatParameters 生成 FormatParameters 和 SignedParameterList +// instead of the url.Values{} +func genFormatParameters(parameters url.Values) (formatParameters string, signedParameterList []string) { + ps := valuesSignMap{} + for key, values := range parameters { + for _, value := range values { + ps.Add(key, value) + signedParameterList = append(signedParameterList, strings.ToLower(safeURLEncode(key))) + } + } + //formatParameters = strings.ToLower(ps.Encode()) + formatParameters = ps.Encode() + sort.Strings(signedParameterList) + return +} + +// genFormatHeaders 生成 FormatHeaders 和 SignedHeaderList +func genFormatHeaders(headers http.Header) (formatHeaders string, signedHeaderList []string) { + hs := valuesSignMap{} + for key, values := range headers { + if isSignHeader(strings.ToLower(key)) { + for _, value := range values { + hs.Add(key, value) + signedHeaderList = append(signedHeaderList, strings.ToLower(safeURLEncode(key))) + } + } + } + formatHeaders = hs.Encode() + sort.Strings(signedHeaderList) + return +} + +// HMAC 签名 +func calHMACDigest(key, msg, signMethod string) []byte { + var hashFunc func() hash.Hash + switch signMethod { + case "sha1": + hashFunc = sha1.New + default: + hashFunc = sha1.New + } + h := hmac.New(hashFunc, []byte(key)) + h.Write([]byte(msg)) + return h.Sum(nil) +} + +func isSignHeader(key string) bool { + for k, v := range NeedSignHeaders { + if key == k && v { + return true + } + } + return strings.HasPrefix(key, privateHeaderPrefix) +} + +// AuthorizationTransport 给请求增加 Authorization header +type AuthorizationTransport struct { + SecretID string + SecretKey string + SessionToken string + rwLocker sync.RWMutex + // 签名多久过期 + Expire time.Duration + Transport http.RoundTripper +} + +// SetCredential update the SecretID(ak), SercretKey(sk), sessiontoken +func (t *AuthorizationTransport) SetCredential(ak, sk, token string) { + t.rwLocker.Lock() + defer t.rwLocker.Unlock() + t.SecretID = ak + t.SecretKey = sk + t.SessionToken = token +} + +// GetCredential get the ak, sk, token +func (t *AuthorizationTransport) GetCredential() (string, string, string) { + t.rwLocker.RLock() + defer t.rwLocker.RUnlock() + return t.SecretID, t.SecretKey, t.SessionToken +} + +// RoundTrip implements the RoundTripper interface. +func (t *AuthorizationTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) // per RoundTrip contract + + ak, sk, token := t.GetCredential() + if strings.HasPrefix(ak, " ") || strings.HasSuffix(ak, " ") { + return nil, fmt.Errorf("SecretID is invalid") + } + if strings.HasPrefix(sk, " ") || strings.HasSuffix(sk, " ") { + return nil, fmt.Errorf("SecretKey is invalid") + } + + // 增加 Authorization header + authTime := NewAuthTime(defaultAuthExpire) + AddAuthorizationHeader(ak, sk, token, req, authTime) + + resp, err := t.transport(req).RoundTrip(req) + return resp, err +} + +func (t *AuthorizationTransport) transport(req *http.Request) http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + // 内部域名默认使用DNS打散 + if rc := internalHost.MatchString(req.URL.Hostname()); rc { + return DNSScatterTransport + } + return http.DefaultTransport +} + +type CVMSecurityCredentials struct { + TmpSecretId string `json:",omitempty"` + TmpSecretKey string `json:",omitempty"` + ExpiredTime int64 `json:",omitempty"` + Expiration string `json:",omitempty"` + Token string `json:",omitempty"` + Code string `json:",omitempty"` +} + +type CVMCredentialTransport struct { + RoleName string + Transport http.RoundTripper + secretID string + secretKey string + sessionToken string + expiredTime int64 + rwLocker sync.RWMutex +} + +func (t *CVMCredentialTransport) GetRoles() ([]string, error) { + urlname := fmt.Sprintf("%s://%s/%s", defaultCVMSchema, defaultCVMMetaHost, defaultCVMCredURI) + resp, err := http.Get(urlname) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + bs, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("get cvm security-credentials role failed, StatusCode: %v, Body: %v", resp.StatusCode, string(bs)) + } + bs, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + roles := strings.Split(strings.TrimSpace(string(bs)), "\n") + if len(roles) == 0 { + return nil, fmt.Errorf("get cvm security-credentials role failed, No valid cam role was found") + } + return roles, nil +} + +// https://cloud.tencent.com/document/product/213/4934 +func (t *CVMCredentialTransport) UpdateCredential(now int64) (string, string, string, error) { + t.rwLocker.Lock() + defer t.rwLocker.Unlock() + if t.expiredTime > now+defaultCVMAuthExpire { + return t.secretID, t.secretKey, t.sessionToken, nil + } + roleName := t.RoleName + if roleName == "" { + roles, err := t.GetRoles() + if err != nil { + return t.secretID, t.secretKey, t.sessionToken, err + } + roleName = roles[0] + } + urlname := fmt.Sprintf("%s://%s/%s/%s", defaultCVMSchema, defaultCVMMetaHost, defaultCVMCredURI, roleName) + resp, err := http.Get(urlname) + if err != nil { + return t.secretID, t.secretKey, t.sessionToken, err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + bs, _ := ioutil.ReadAll(resp.Body) + return t.secretID, t.secretKey, t.sessionToken, fmt.Errorf("call cvm security-credentials failed, StatusCode: %v, Body: %v", resp.StatusCode, string(bs)) + } + var cred CVMSecurityCredentials + err = json.NewDecoder(resp.Body).Decode(&cred) + if err != nil { + return t.secretID, t.secretKey, t.sessionToken, err + } + if cred.Code != "Success" { + return t.secretID, t.secretKey, t.sessionToken, fmt.Errorf("call cvm security-credentials failed, Code:%v", cred.Code) + } + t.secretID, t.secretKey, t.sessionToken, t.expiredTime = cred.TmpSecretId, cred.TmpSecretKey, cred.Token, cred.ExpiredTime + return t.secretID, t.secretKey, t.sessionToken, nil +} + +func (t *CVMCredentialTransport) GetCredential() (string, string, string, error) { + now := time.Now().Unix() + t.rwLocker.RLock() + // 提前 defaultCVMAuthExpire 获取重新获取临时密钥 + if t.expiredTime <= now+defaultCVMAuthExpire { + expiredTime := t.expiredTime + t.rwLocker.RUnlock() + secretID, secretKey, secretToken, err := t.UpdateCredential(now) + // 获取临时密钥失败但密钥未过期 + if err != nil && now < expiredTime { + err = nil + } + return secretID, secretKey, secretToken, err + } + defer t.rwLocker.RUnlock() + return t.secretID, t.secretKey, t.sessionToken, nil +} + +func (t *CVMCredentialTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ak, sk, token, err := t.GetCredential() + if err != nil { + return nil, err + } + req = cloneRequest(req) + // 增加 Authorization header + authTime := NewAuthTime(defaultAuthExpire) + AddAuthorizationHeader(ak, sk, token, req, authTime) + + resp, err := t.transport().RoundTrip(req) + return resp, err +} + +func (t *CVMCredentialTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +type CredentialTransport struct { + Transport http.RoundTripper + Credential CredentialIface +} + +func (t *CredentialTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ak, sk, token := t.Credential.GetSecretId(), t.Credential.GetSecretKey(), t.Credential.GetToken() + + req = cloneRequest(req) + // 增加 Authorization header + authTime := NewAuthTime(defaultAuthExpire) + AddAuthorizationHeader(ak, sk, token, req, authTime) + + resp, err := t.transport().RoundTrip(req) + return resp, err +} + +func (t *CredentialTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +type CredentialIface interface { + GetSecretId() string + GetToken() string + GetSecretKey() string +} + +func NewTokenCredential(secretId, secretKey, token string) *Credential { + return &Credential{ + SecretID: secretId, + SecretKey: secretKey, + SessionToken: token, + } +} + +func (c *Credential) GetSecretKey() string { + return c.SecretKey +} + +func (c *Credential) GetSecretId() string { + return c.SecretID +} + +func (c *Credential) GetToken() string { + return c.SessionToken +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/batch.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/batch.go new file mode 100644 index 0000000..3af9817 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/batch.go @@ -0,0 +1,262 @@ +package cos + +import ( + "context" + "encoding/xml" + "fmt" + "net/http" +) + +type BatchService service + +type BatchRequestHeaders struct { + XCosAppid int `header:"x-cos-appid" xml:"-" url:"-"` + ContentLength string `header:"Content-Length,omitempty" xml:"-" url:"-"` + ContentType string `header:"Content-Type,omitempty" xml:"-" url:"-"` + Headers *http.Header `header:"-" xml:"-" url:"-"` +} + +// BatchProgressSummary +type BatchProgressSummary struct { + NumberOfTasksFailed int `xml:"NumberOfTasksFailed" header:"-" url:"-"` + NumberOfTasksSucceeded int `xml:"NumberOfTasksSucceeded" header:"-" url:"-"` + TotalNumberOfTasks int `xml:"TotalNumberOfTasks" header:"-" url:"-"` +} + +// BatchJobReport +type BatchJobReport struct { + Bucket string `xml:"Bucket" header:"-" url:"-"` + Enabled string `xml:"Enabled" header:"-" url:"-"` + Format string `xml:"Format" header:"-" url:"-"` + Prefix string `xml:"Prefix,omitempty" header:"-" url:"-"` + ReportScope string `xml:"ReportScope" header:"-" url:"-"` +} + +// BatchJobOperationCopy +type BatchMetadata struct { + Key string `xml:"Key" header:"-" url:"-"` + Value string `xml:"Value" header:"-" url:"-"` +} +type BatchNewObjectMetadata struct { + CacheControl string `xml:"CacheControl,omitempty" header:"-" url:"-"` + ContentDisposition string `xml:"ContentDisposition,omitempty" header:"-" url:"-"` + ContentEncoding string `xml:"ContentEncoding,omitempty" header:"-" url:"-"` + ContentType string `xml:"ContentType,omitempty" header:"-" url:"-"` + HttpExpiresDate string `xml:"HttpExpiresDate,omitempty" header:"-" url:"-"` + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty" header:"-" url:"-"` + UserMetadata []BatchMetadata `xml:"UserMetadata>member,omitempty" header:"-" url:"-"` +} +type BatchGrantee struct { + DisplayName string `xml:"DisplayName,omitempty" header:"-" url:"-"` + Identifier string `xml:"Identifier" header:"-" url:"-"` + TypeIdentifier string `xml:"TypeIdentifier" header:"-" url:"-"` +} +type BatchCOSGrant struct { + Grantee *BatchGrantee `xml:"Grantee" header:"-" url:"-"` + Permission string `xml:"Permission" header:"-" url:"-"` +} +type BatchAccessControlGrants struct { + COSGrants *BatchCOSGrant `xml:"COSGrant,omitempty" header:"-" url:"-"` +} +type BatchJobOperationCopy struct { + AccessControlGrants *BatchAccessControlGrants `xml:"AccessControlGrants,omitempty" header:"-" url:"-"` + CannedAccessControlList string `xml:"CannedAccessControlList,omitempty" header:"-" url:"-"` + MetadataDirective string `xml:"MetadataDirective,omitempty" header:"-" url:"-"` + ModifiedSinceConstraint int64 `xml:"ModifiedSinceConstraint,omitempty" header:"-" url:"-"` + UnModifiedSinceConstraint int64 `xml:"UnModifiedSinceConstraint,omitempty" header:"-" url:"-"` + NewObjectMetadata *BatchNewObjectMetadata `xml:"NewObjectMetadata,omitempty" header:"-" url:"-"` + StorageClass string `xml:"StorageClass,omitempty" header:"-" url:"-"` + TargetResource string `xml:"TargetResource" header:"-" url:"-"` +} + +// BatchInitiateRestoreObject +type BatchInitiateRestoreObject struct { + ExpirationInDays int `xml:"ExpirationInDays"` + JobTier string `xml:"JobTier"` +} + +// BatchJobOperation +type BatchJobOperation struct { + PutObjectCopy *BatchJobOperationCopy `xml:"COSPutObjectCopy,omitempty" header:"-" url:"-"` + RestoreObject *BatchInitiateRestoreObject `xml:"COSInitiateRestoreObject,omitempty" header:"-" url:"-"` +} + +// BatchJobManifest +type BatchJobManifestLocation struct { + ETag string `xml:"ETag" header:"-" url:"-"` + ObjectArn string `xml:"ObjectArn" header:"-" url:"-"` + ObjectVersionId string `xml:"ObjectVersionId,omitempty" header:"-" url:"-"` +} +type BatchJobManifestSpec struct { + Fields []string `xml:"Fields>member,omitempty" header:"-" url:"-"` + Format string `xml:"Format" header:"-" url:"-"` +} +type BatchJobManifest struct { + Location *BatchJobManifestLocation `xml:"Location" header:"-" url:"-"` + Spec *BatchJobManifestSpec `xml:"Spec" header:"-" url:"-"` +} + +type BatchCreateJobOptions struct { + XMLName xml.Name `xml:"CreateJobRequest" header:"-" url:"-"` + ClientRequestToken string `xml:"ClientRequestToken" header:"-" url:"-"` + ConfirmationRequired string `xml:"ConfirmationRequired,omitempty" header:"-" url:"-"` + Description string `xml:"Description,omitempty" header:"-" url:"-"` + Manifest *BatchJobManifest `xml:"Manifest" header:"-" url:"-"` + Operation *BatchJobOperation `xml:"Operation" header:"-" url:"-"` + Priority int `xml:"Priority" header:"-" url:"-"` + Report *BatchJobReport `xml:"Report" header:"-" url:"-"` + RoleArn string `xml:"RoleArn" header:"-" url:"-"` +} + +type BatchCreateJobResult struct { + XMLName xml.Name `xml:"CreateJobResult"` + JobId string `xml:"JobId,omitempty"` +} + +func (s *BatchService) CreateJob(ctx context.Context, opt *BatchCreateJobOptions, headers *BatchRequestHeaders) (*BatchCreateJobResult, *Response, error) { + var res BatchCreateJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BatchURL, + uri: "/jobs", + method: http.MethodPost, + optHeader: headers, + body: opt, + result: &res, + } + + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type BatchJobFailureReasons struct { + FailureCode string `xml:"FailureCode" header:"-" url:"-"` + FailureReason string `xml:"FailureReason" header:"-" url:"-"` +} + +type BatchDescribeJob struct { + ConfirmationRequired string `xml:"ConfirmationRequired,omitempty" header:"-" url:"-"` + CreationTime string `xml:"CreationTime,omitempty" header:"-" url:"-"` + Description string `xml:"Description,omitempty" header:"-" url:"-"` + FailureReasons *BatchJobFailureReasons `xml:"FailureReasons>JobFailure,omitempty" header:"-" url:"-"` + JobId string `xml:"JobId" header:"-" url:"-"` + Manifest *BatchJobManifest `xml:"Manifest" header:"-" url:"-"` + Operation *BatchJobOperation `xml:"Operation" header:"-" url:"-"` + Priority int `xml:"Priority" header:"-" url:"-"` + ProgressSummary *BatchProgressSummary `xml:"ProgressSummary" header:"-" url:"-"` + Report *BatchJobReport `xml:"Report,omitempty" header:"-" url:"-"` + RoleArn string `xml:"RoleArn,omitempty" header:"-" url:"-"` + Status string `xml:"Status,omitempty" header:"-" url:"-"` + StatusUpdateReason string `xml:"StatusUpdateReason,omitempty" header:"-" url:"-"` + SuspendedCause string `xml:"SuspendedCause,omitempty" header:"-" url:"-"` + SuspendedDate string `xml:"SuspendedDate,omitempty" header:"-" url:"-"` + TerminationDate string `xml:"TerminationDate,omitempty" header:"-" url:"-"` +} +type BatchDescribeJobResult struct { + XMLName xml.Name `xml:"DescribeJobResult"` + Job *BatchDescribeJob `xml:"Job,omitempty"` +} + +func (s *BatchService) DescribeJob(ctx context.Context, id string, headers *BatchRequestHeaders) (*BatchDescribeJobResult, *Response, error) { + var res BatchDescribeJobResult + u := fmt.Sprintf("/jobs/%s", id) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BatchURL, + uri: u, + method: http.MethodGet, + optHeader: headers, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type BatchListJobsOptions struct { + JobStatuses string `url:"jobStatuses,omitempty" header:"-" xml:"-"` + MaxResults int `url:"maxResults,omitempty" header:"-" xml:"-"` + NextToken string `url:"nextToken,omitempty" header:"-" xml:"-"` +} + +type BatchListJobsMember struct { + CreationTime string `xml:"CreationTime,omitempty" header:"-" url:"-"` + Description string `xml:"Description,omitempty" header:"-" url:"-"` + JobId string `xml:"JobId,omitempty" header:"-" url:"-"` + Operation string `xml:"Operation,omitempty" header:"-" url:"-"` + Priority int `xml:"Priority,omitempty" header:"-" url:"-"` + ProgressSummary *BatchProgressSummary `xml:"ProgressSummary,omitempty" header:"-" url:"-"` + Status string `xml:"Status,omitempty" header:"-" url:"-"` + TerminationDate string `xml:"TerminationDate,omitempty" header:"-" url:"-"` +} +type BatchListJobs struct { + Members []BatchListJobsMember `xml:"member,omitempty" header:"-" url:"-"` +} +type BatchListJobsResult struct { + XMLName xml.Name `xml:"ListJobsResult"` + Jobs *BatchListJobs `xml:"Jobs,omitempty"` + NextToken string `xml:"NextToken,omitempty"` +} + +func (s *BatchService) ListJobs(ctx context.Context, opt *BatchListJobsOptions, headers *BatchRequestHeaders) (*BatchListJobsResult, *Response, error) { + var res BatchListJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BatchURL, + uri: "/jobs", + method: http.MethodGet, + optQuery: opt, + optHeader: headers, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type BatchUpdatePriorityOptions struct { + JobId string `url:"-" header:"-" xml:"-"` + Priority int `url:"priority" header:"-" xml:"-"` +} +type BatchUpdatePriorityResult struct { + XMLName xml.Name `xml:"UpdateJobPriorityResult"` + JobId string `xml:"JobId,omitempty"` + Priority int `xml:"Priority,omitempty"` +} + +func (s *BatchService) UpdateJobPriority(ctx context.Context, opt *BatchUpdatePriorityOptions, headers *BatchRequestHeaders) (*BatchUpdatePriorityResult, *Response, error) { + u := fmt.Sprintf("/jobs/%s/priority", opt.JobId) + var res BatchUpdatePriorityResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BatchURL, + uri: u, + method: http.MethodPost, + optQuery: opt, + optHeader: headers, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type BatchUpdateStatusOptions struct { + JobId string `header:"-" url:"-" xml:"-"` + RequestedJobStatus string `url:"requestedJobStatus" header:"-" xml:"-"` + StatusUpdateReason string `url:"statusUpdateReason,omitempty" header:"-" xml:"-"` +} +type BatchUpdateStatusResult struct { + XMLName xml.Name `xml:"UpdateJobStatusResult"` + JobId string `xml:"JobId,omitempty"` + Status string `xml:"Status,omitempty"` + StatusUpdateReason string `xml:"StatusUpdateReason,omitempty"` +} + +func (s *BatchService) UpdateJobStatus(ctx context.Context, opt *BatchUpdateStatusOptions, headers *BatchRequestHeaders) (*BatchUpdateStatusResult, *Response, error) { + u := fmt.Sprintf("/jobs/%s/status", opt.JobId) + var res BatchUpdateStatusResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BatchURL, + uri: u, + method: http.MethodPost, + optQuery: opt, + optHeader: headers, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket.go new file mode 100644 index 0000000..4ba3639 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket.go @@ -0,0 +1,199 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// BucketService 相关 API +type BucketService service + +// BucketGetResult is the result of GetBucket +type BucketGetResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Name string + Prefix string `xml:"Prefix,omitempty"` + Marker string `xml:"Marker,omitempty"` + NextMarker string `xml:"NextMarker,omitempty"` + Delimiter string `xml:"Delimiter,omitempty"` + MaxKeys int + IsTruncated bool + Contents []Object `xml:"Contents,omitempty"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix,omitempty"` + EncodingType string `xml:"EncodingType,omitempty"` +} + +// BucketGetOptions is the option of GetBucket +type BucketGetOptions struct { + Prefix string `url:"prefix,omitempty"` + Delimiter string `url:"delimiter,omitempty"` + EncodingType string `url:"encoding-type,omitempty"` + Marker string `url:"marker,omitempty"` + MaxKeys int `url:"max-keys,omitempty"` +} + +// Get Bucket请求等同于 List Object请求,可以列出该Bucket下部分或者所有Object,发起该请求需要拥有Read权限。 +// +// https://www.qcloud.com/document/product/436/7734 +func (s *BucketService) Get(ctx context.Context, opt *BucketGetOptions) (*BucketGetResult, *Response, error) { + var res BucketGetResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/", + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// BucketPutOptions is same to the ACLHeaderOptions +type BucketPutOptions struct { + XCosACL string `header:"x-cos-acl,omitempty" url:"-" xml:"-"` + XCosGrantRead string `header:"x-cos-grant-read,omitempty" url:"-" xml:"-"` + XCosGrantWrite string `header:"x-cos-grant-write,omitempty" url:"-" xml:"-"` + XCosGrantFullControl string `header:"x-cos-grant-full-control,omitempty" url:"-" xml:"-"` + XCosGrantReadACP string `header:"x-cos-grant-read-acp,omitempty" url:"-" xml:"-"` + XCosGrantWriteACP string `header:"x-cos-grant-write-acp,omitempty" url:"-" xml:"-"` + CreateBucketConfiguration *CreateBucketConfiguration `header:"-" url:"-" xml:"-"` +} +type CreateBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + BucketAZConfig string `xml:"BucketAZConfig,omitempty"` +} + +// Put Bucket请求可以在指定账号下创建一个Bucket。 +// +// https://www.qcloud.com/document/product/436/7738 +func (s *BucketService) Put(ctx context.Context, opt *BucketPutOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/", + method: http.MethodPut, + optHeader: opt, + } + if opt != nil && opt.CreateBucketConfiguration != nil { + sendOpt.body = opt.CreateBucketConfiguration + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +// Delete Bucket请求可以在指定账号下删除Bucket,删除之前要求Bucket为空。 +// +// https://www.qcloud.com/document/product/436/7732 +func (s *BucketService) Delete(ctx context.Context) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/", + method: http.MethodDelete, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +type BucketHeadOptions struct { + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// Head Bucket请求可以确认是否存在该Bucket,是否有权限访问,Head的权限与Read一致。 +// +// 当其存在时,返回 HTTP 状态码200; +// 当无权限时,返回 HTTP 状态码403; +// 当不存在时,返回 HTTP 状态码404。 +// +// https://www.qcloud.com/document/product/436/7735 +func (s *BucketService) Head(ctx context.Context, opt ...*BucketHeadOptions) (*Response, error) { + var hopt *BucketHeadOptions + if len(opt) > 0 { + hopt = opt[0] + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/", + method: http.MethodHead, + optHeader: hopt, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +func (s *BucketService) IsExist(ctx context.Context) (bool, error) { + _, err := s.Head(ctx) + if err == nil { + return true, nil + } + if IsNotFoundError(err) { + return false, nil + } + return false, err +} + +// Bucket is the meta info of Bucket +type Bucket struct { + Name string + Region string `xml:"Location,omitempty"` + CreationDate string `xml:",omitempty"` +} + +type BucketGetObjectVersionsOptions struct { + Prefix string `url:"prefix,omitempty" header:"-"` + Delimiter string `url:"delimiter,omitempty" header:"-"` + EncodingType string `url:"encoding-type,omitempty" header:"-"` + KeyMarker string `url:"key-marker,omitempty" header:"-"` + VersionIdMarker string `url:"version-id-marker,omitempty" header:"-"` + MaxKeys int `url:"max-keys,omitempty" header:"-"` + XOptionHeader *http.Header `url:"-" header:"-,omitempty" xml:"-"` +} + +type BucketGetObjectVersionsResult struct { + XMLName xml.Name `xml:"ListVersionsResult"` + Name string `xml:"Name,omitempty"` + EncodingType string `xml:"EncodingType,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + KeyMarker string `xml:"KeyMarker,omitempty"` + VersionIdMarker string `xml:"VersionIdMarker,omitempty"` + MaxKeys int `xml:"MaxKeys,omitempty"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated,omitempty"` + NextKeyMarker string `xml:"NextKeyMarker,omitempty"` + NextVersionIdMarker string `xml:"NextVersionIdMarker,omitempty"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix,omitempty"` + Version []ListVersionsResultVersion `xml:"Version,omitempty"` + DeleteMarker []ListVersionsResultDeleteMarker `xml:"DeleteMarker,omitempty"` +} + +type ListVersionsResultVersion struct { + Key string `xml:"Key,omitempty"` + VersionId string `xml:"VersionId,omitempty"` + IsLatest bool `xml:"IsLatest,omitempty"` + LastModified string `xml:"LastModified,omitempty"` + ETag string `xml:"ETag,omitempty"` + Size int `xml:"Size,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` +} + +type ListVersionsResultDeleteMarker struct { + Key string `xml:"Key,omitempty"` + VersionId string `xml:"VersionId,omitempty"` + IsLatest bool `xml:"IsLatest,omitempty"` + LastModified string `xml:"LastModified,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` +} + +func (s *BucketService) GetObjectVersions(ctx context.Context, opt *BucketGetObjectVersionsOptions) (*BucketGetObjectVersionsResult, *Response, error) { + var res BucketGetObjectVersionsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?versions", + method: http.MethodGet, + optQuery: opt, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_accelerate.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_accelerate.go new file mode 100644 index 0000000..48192db --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_accelerate.go @@ -0,0 +1,37 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketPutAccelerateOptions struct { + XMLName xml.Name `xml:"AccelerateConfiguration"` + Status string `xml:"Status,omitempty"` + Type string `xml:"Type,omitempty"` +} +type BucketGetAccelerateResult BucketPutAccelerateOptions + +func (s *BucketService) PutAccelerate(ctx context.Context, opt *BucketPutAccelerateOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?accelerate", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetAccelerate(ctx context.Context) (*BucketGetAccelerateResult, *Response, error) { + var res BucketGetAccelerateResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?accelerate", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_acl.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_acl.go new file mode 100644 index 0000000..d0575c8 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_acl.go @@ -0,0 +1,65 @@ +package cos + +import ( + "context" + "net/http" +) + +// BucketGetACLResult is same to the ACLXml +type BucketGetACLResult = ACLXml + +// GetACL 使用API读取Bucket的ACL表,只有所有者有权操作。 +// +// https://www.qcloud.com/document/product/436/7733 +func (s *BucketService) GetACL(ctx context.Context) (*BucketGetACLResult, *Response, error) { + var res BucketGetACLResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?acl", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + if err == nil { + decodeACL(resp, &res) + } + return &res, resp, err +} + +// BucketPutACLOptions is the option of PutBucketACL +type BucketPutACLOptions struct { + Header *ACLHeaderOptions `url:"-" xml:"-"` + Body *ACLXml `url:"-" header:"-"` +} + +// PutACL 使用API写入Bucket的ACL表,您可以通过Header:"x-cos-acl","x-cos-grant-read", +// "x-cos-grant-write","x-cos-grant-full-control"传入ACL信息,也可以通过body以XML格式传入ACL信息, +// +// 但是只能选择Header和Body其中一种,否则返回冲突。 +// +// Put Bucket ACL是一个覆盖操作,传入新的ACL将覆盖原有ACL。只有所有者有权操作。 +// +// "x-cos-acl":枚举值为public-read,private;public-read意味这个Bucket有公有读私有写的权限, +// private意味这个Bucket有私有读写的权限。 +// +// "x-cos-grant-read":意味被赋予权限的用户拥有该Bucket的读权限 +// "x-cos-grant-write":意味被赋予权限的用户拥有该Bucket的写权限 +// "x-cos-grant-full-control":意味被赋予权限的用户拥有该Bucket的读写权限 +// +// https://www.qcloud.com/document/product/436/7737 +func (s *BucketService) PutACL(ctx context.Context, opt *BucketPutACLOptions) (*Response, error) { + header := opt.Header + body := opt.Body + if body != nil { + header = nil + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?acl", + method: http.MethodPut, + body: body, + optHeader: header, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_cors.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_cors.go new file mode 100644 index 0000000..6edc6cc --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_cors.go @@ -0,0 +1,71 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// BucketCORSRule is the rule of BucketCORS +type BucketCORSRule struct { + ID string `xml:"ID,omitempty"` + AllowedMethods []string `xml:"AllowedMethod"` + AllowedOrigins []string `xml:"AllowedOrigin"` + AllowedHeaders []string `xml:"AllowedHeader,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"` + ExposeHeaders []string `xml:"ExposeHeader,omitempty"` +} + +// BucketGetCORSResult is the result of GetBucketCORS +type BucketGetCORSResult struct { + XMLName xml.Name `xml:"CORSConfiguration"` + Rules []BucketCORSRule `xml:"CORSRule,omitempty"` +} + +// GetCORS 实现 Bucket 跨域访问配置读取。 +// +// https://www.qcloud.com/document/product/436/8274 +func (s *BucketService) GetCORS(ctx context.Context) (*BucketGetCORSResult, *Response, error) { + var res BucketGetCORSResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?cors", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// BucketPutCORSOptions is the option of PutBucketCORS +type BucketPutCORSOptions struct { + XMLName xml.Name `xml:"CORSConfiguration"` + Rules []BucketCORSRule `xml:"CORSRule,omitempty"` +} + +// PutCORS 实现 Bucket 跨域访问设置,您可以通过传入XML格式的配置文件实现配置,文件大小限制为64 KB。 +// +// https://www.qcloud.com/document/product/436/8279 +func (s *BucketService) PutCORS(ctx context.Context, opt *BucketPutCORSOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?cors", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// DeleteCORS 实现 Bucket 跨域访问配置删除。 +// +// https://www.qcloud.com/document/product/436/8283 +func (s *BucketService) DeleteCORS(ctx context.Context) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?cors", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_domain.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_domain.go new file mode 100644 index 0000000..f00688b --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_domain.go @@ -0,0 +1,53 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketDomainRule struct { + Status string `xml:"Status,omitempty"` + Name string `xml:"Name,omitempty"` + Type string `xml:"Type,omitempty"` + ForcedReplacement string `xml:"ForcedReplacement,omitempty"` +} + +type BucketPutDomainOptions struct { + XMLName xml.Name `xml:"DomainConfiguration"` + Rules []BucketDomainRule `xml:"DomainRule,omitempty"` +} +type BucketGetDomainResult BucketPutDomainOptions + +func (s *BucketService) PutDomain(ctx context.Context, opt *BucketPutDomainOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?domain", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetDomain(ctx context.Context) (*BucketGetDomainResult, *Response, error) { + var res BucketGetDomainResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?domain", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +func (s *BucketService) DeleteDomain(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?domain", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_encryption.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_encryption.go new file mode 100644 index 0000000..d41c143 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_encryption.go @@ -0,0 +1,51 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketEncryptionConfiguration struct { + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +type BucketPutEncryptionOptions struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rule *BucketEncryptionConfiguration `xml:"Rule>ApplyServerSideEncryptionByDefault"` +} + +type BucketGetEncryptionResult BucketPutEncryptionOptions + +func (s *BucketService) PutEncryption(ctx context.Context, opt *BucketPutEncryptionOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?encryption", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetEncryption(ctx context.Context) (*BucketGetEncryptionResult, *Response, error) { + var res BucketGetEncryptionResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?encryption", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +func (s *BucketService) DeleteEncryption(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?encryption", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_intelligenttiering.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_intelligenttiering.go new file mode 100644 index 0000000..29a370e --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_intelligenttiering.go @@ -0,0 +1,47 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketIntelligentTieringTransition struct { + Days int `xml:"Days,omitempty"` + RequestFrequent int `xml:"RequestFrequent,omitempty"` +} + +type BucketPutIntelligentTieringOptions struct { + XMLName xml.Name `xml:"IntelligentTieringConfiguration"` + Status string `xml:"Status,omitempty"` + Transition *BucketIntelligentTieringTransition `xml:"Transition,omitempty"` +} + +type BucketGetIntelligentTieringResult BucketPutIntelligentTieringOptions + +func (s *BucketService) PutIntelligentTiering(ctx context.Context, opt *BucketPutIntelligentTieringOptions) (*Response, error) { + if opt != nil && opt.Transition != nil { + opt.Transition.RequestFrequent = 1 + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?intelligenttiering", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +func (s *BucketService) GetIntelligentTiering(ctx context.Context) (*BucketGetIntelligentTieringResult, *Response, error) { + var res BucketGetIntelligentTieringResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?intelligenttiering", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err + +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_inventory.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_inventory.go new file mode 100644 index 0000000..45a772d --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_inventory.go @@ -0,0 +1,132 @@ +package cos + +import ( + "context" + "encoding/xml" + "fmt" + "net/http" +) + +// Notice bucket_inventory only for test. can not use + +// BucketGetInventoryResult same struct to options +type BucketGetInventoryResult BucketPutInventoryOptions + +// BucketListInventoryConfiguartion same struct to options +type BucketListInventoryConfiguartion BucketPutInventoryOptions + +type BucketInventoryFilterPeriod struct { + StartTime int64 `xml:"StartTime,omitempty"` + EndTime int64 `xml:"EndTime,omitempty"` +} + +// BucketInventoryFilter ... +type BucketInventoryFilter struct { + Prefix string `xml:"Prefix,omitempty"` + Period *BucketInventoryFilterPeriod `xml:"Period,omitempty"` +} + +// BucketInventoryOptionalFields ... +type BucketInventoryOptionalFields struct { + BucketInventoryFields []string `xml:"Field,omitempty"` +} + +// BucketInventorySchedule ... +type BucketInventorySchedule struct { + Frequency string `xml:"Frequency"` +} + +// BucketInventoryEncryption ... +type BucketInventoryEncryption struct { + SSECOS string `xml:"SSE-COS"` +} + +// BucketInventoryDestination ... +type BucketInventoryDestination struct { + Bucket string `xml:"Bucket"` + AccountId string `xml:"AccountId,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Format string `xml:"Format"` + Encryption *BucketInventoryEncryption `xml:"Encryption,omitempty"` +} + +// BucketPutInventoryOptions ... +type BucketPutInventoryOptions struct { + XMLName xml.Name `xml:"InventoryConfiguration"` + ID string `xml:"Id"` + IsEnabled string `xml:"IsEnabled"` + IncludedObjectVersions string `xml:"IncludedObjectVersions"` + Filter *BucketInventoryFilter `xml:"Filter,omitempty"` + OptionalFields *BucketInventoryOptionalFields `xml:"OptionalFields,omitempty"` + Schedule *BucketInventorySchedule `xml:"Schedule"` + Destination *BucketInventoryDestination `xml:"Destination>COSBucketDestination"` +} + +// ListBucketInventoryConfigResult result of ListBucketInventoryConfiguration +type ListBucketInventoryConfigResult struct { + XMLName xml.Name `xml:"ListInventoryConfigurationResult"` + InventoryConfigurations []BucketListInventoryConfiguartion `xml:"InventoryConfiguration,omitempty"` + IsTruncated bool `xml:"IsTruncated,omitempty"` + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` +} + +// PutBucketInventory https://cloud.tencent.com/document/product/436/33707 +func (s *BucketService) PutInventory(ctx context.Context, id string, opt *BucketPutInventoryOptions) (*Response, error) { + u := fmt.Sprintf("/?inventory&id=%s", id) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err + +} + +// GetBucketInventory https://cloud.tencent.com/document/product/436/33705 +func (s *BucketService) GetInventory(ctx context.Context, id string) (*BucketGetInventoryResult, *Response, error) { + u := fmt.Sprintf("/?inventory&id=%s", id) + var res BucketGetInventoryResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// DeleteBucketInventory https://cloud.tencent.com/document/product/436/33704 +func (s *BucketService) DeleteInventory(ctx context.Context, id string) (*Response, error) { + u := fmt.Sprintf("/?inventory&id=%s", id) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// ListBucketInventoryConfigurations https://cloud.tencent.com/document/product/436/33706 +func (s *BucketService) ListInventoryConfigurations(ctx context.Context, token string) (*ListBucketInventoryConfigResult, *Response, error) { + var res ListBucketInventoryConfigResult + var u string + if token == "" { + u = "/?inventory" + } else { + u = fmt.Sprintf("/?inventory&continuation-token=%s", encodeURIComponent(token)) + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err + +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_lifecycle.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_lifecycle.go new file mode 100644 index 0000000..9d5e3eb --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_lifecycle.go @@ -0,0 +1,127 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketLifecycleAndOperator struct { + Prefix string `xml:"Prefix,omitempty" header:"-"` + Tag []BucketTaggingTag `xml:"Tag,omitempty" header:"-"` +} + +// BucketLifecycleFilter is the param of BucketLifecycleRule +type BucketLifecycleFilter struct { + Prefix string `xml:"Prefix,omitempty" header:"-"` + Tag *BucketTaggingTag `xml:"Tag,omitempty" header:"-"` + And *BucketLifecycleAndOperator `xml:"And,omitempty" header:"-"` +} + +// BucketLifecycleExpiration is the param of BucketLifecycleRule +type BucketLifecycleExpiration struct { + Date string `xml:"Date,omitempty" header:"-"` + Days int `xml:"Days,omitempty" header:"-"` + ExpiredObjectDeleteMarker bool `xml:"ExpiredObjectDeleteMarker,omitempty" header:"-"` +} + +// BucketLifecycleTransition is the param of BucketLifecycleRule +type BucketLifecycleTransition struct { + Date string `xml:"Date,omitempty" header:"-"` + Days int `xml:"Days,omitempty" header:"-"` + StorageClass string `xml:"StorageClass,omitempty" header:"-"` +} + +type BucketLifecycleNoncurrentVersion struct { + NoncurrentDays int `xml:"NoncurrentDays,omitempty" header:"-"` + StorageClass string `xml:"StorageClass,omitempty" header:"-"` +} + +// BucketLifecycleAbortIncompleteMultipartUpload is the param of BucketLifecycleRule +type BucketLifecycleAbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `xml:"DaysAfterInitiation,omitempty" header:"-"` +} + +// BucketLifecycleRule is the rule of BucketLifecycle +type BucketLifecycleRule struct { + ID string `xml:"ID,omitempty" header:"-"` + Status string `xml:"Status,omitempty" header:"-"` + Filter *BucketLifecycleFilter `xml:"Filter,omitempty" header:"-"` + Transition []BucketLifecycleTransition `xml:"Transition,omitempty" header:"-"` + Expiration *BucketLifecycleExpiration `xml:"Expiration,omitempty" header:"-"` + AbortIncompleteMultipartUpload *BucketLifecycleAbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" header:"-"` + NoncurrentVersionTransition []BucketLifecycleNoncurrentVersion `xml:"NoncurrentVersionTransition,omitempty" header:"-"` + NoncurrentVersionExpiration *BucketLifecycleNoncurrentVersion `xml:"NoncurrentVersionExpiration,omitempty" header:"-"` +} + +// BucketGetLifecycleResult is the result of BucketGetLifecycle +type BucketGetLifecycleResult struct { + XMLName xml.Name `xml:"LifecycleConfiguration" header:"-"` + Rules []BucketLifecycleRule `xml:"Rule,omitempty" header:"-"` +} + +type BucketGetLifecycleOptions struct { + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// GetLifecycle 请求实现读取生命周期管理的配置。当配置不存在时,返回404 Not Found。 +// https://www.qcloud.com/document/product/436/8278 +func (s *BucketService) GetLifecycle(ctx context.Context, opt ...*BucketGetLifecycleOptions) (*BucketGetLifecycleResult, *Response, error) { + var optHeader *BucketGetLifecycleOptions + if len(opt) > 0 { + optHeader = opt[0] + } + var res BucketGetLifecycleResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?lifecycle", + method: http.MethodGet, + optHeader: optHeader, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// BucketPutLifecycleOptions is the option of PutBucketLifecycle +type BucketPutLifecycleOptions struct { + XMLName xml.Name `xml:"LifecycleConfiguration" header:"-"` + Rules []BucketLifecycleRule `xml:"Rule,omitempty" header:"-"` + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// PutLifecycle 请求实现设置生命周期管理的功能。您可以通过该请求实现数据的生命周期管理配置和定期删除。 +// 此请求为覆盖操作,上传新的配置文件将覆盖之前的配置文件。生命周期管理对文件和文件夹同时生效。 +// https://www.qcloud.com/document/product/436/8280 +func (s *BucketService) PutLifecycle(ctx context.Context, opt *BucketPutLifecycleOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?lifecycle", + method: http.MethodPut, + optHeader: opt, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +type BucketDeleteLifecycleOptions struct { + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// DeleteLifecycle 请求实现删除生命周期管理。 +// https://www.qcloud.com/document/product/436/8284 +func (s *BucketService) DeleteLifecycle(ctx context.Context, opt ...*BucketDeleteLifecycleOptions) (*Response, error) { + var optHeader *BucketDeleteLifecycleOptions + if len(opt) > 0 { + optHeader = opt[0] + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?lifecycle", + optHeader: optHeader, + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_location.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_location.go new file mode 100644 index 0000000..dd4b5a5 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_location.go @@ -0,0 +1,28 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// BucketGetLocationResult is the result of BucketGetLocation +type BucketGetLocationResult struct { + XMLName xml.Name `xml:"LocationConstraint"` + Location string `xml:",chardata"` +} + +// GetLocation 接口获取Bucket所在地域信息,只有Bucket所有者有权限读取信息。 +// +// https://www.qcloud.com/document/product/436/8275 +func (s *BucketService) GetLocation(ctx context.Context) (*BucketGetLocationResult, *Response, error) { + var res BucketGetLocationResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?location", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_logging.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_logging.go new file mode 100644 index 0000000..88a1a95 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_logging.go @@ -0,0 +1,50 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// Notice bucket logging function is testing, can not use. + +// BucketLoggingEnabled main struct of logging +type BucketLoggingEnabled struct { + TargetBucket string `xml:"TargetBucket"` + TargetPrefix string `xml:"TargetPrefix"` +} + +// BucketPutLoggingOptions is the options of PutBucketLogging +type BucketPutLoggingOptions struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled *BucketLoggingEnabled `xml:"LoggingEnabled,omitempty"` +} + +// BucketGetLoggingResult is the result of GetBucketLogging +type BucketGetLoggingResult BucketPutLoggingOptions + +// PutBucketLogging https://cloud.tencent.com/document/product/436/17054 +func (s *BucketService) PutLogging(ctx context.Context, opt *BucketPutLoggingOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?logging", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// GetBucketLogging https://cloud.tencent.com/document/product/436/17053 +func (s *BucketService) GetLogging(ctx context.Context) (*BucketGetLoggingResult, *Response, error) { + var res BucketGetLoggingResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?logging", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err + +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_origin.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_origin.go new file mode 100644 index 0000000..c6cc5fd --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_origin.go @@ -0,0 +1,91 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketPutOriginOptions struct { + XMLName xml.Name `xml:"OriginConfiguration"` + Rule []BucketOriginRule `xml:"OriginRule"` +} + +type BucketOriginRule struct { + RulePriority int `xml:"RulePriority,omitempty"` + OriginType string `xml:"OriginType,omitempty"` + OriginCondition *BucketOriginCondition `xml:"OriginCondition,omitempty"` + OriginParameter *BucketOriginParameter `xml:"OriginParameter,omitempty"` + OriginInfo *BucketOriginInfo `xml:"OriginInfo,omitempty"` +} + +type BucketOriginCondition struct { + HTTPStatusCode string `xml:"HTTPStatusCode,omitempty"` + Prefix string `xml:"Prefix,omitempty"` +} + +type BucketOriginParameter struct { + Protocol string `xml:"Protocol,omitempty"` + FollowQueryString bool `xml:"FollowQueryString,omitempty"` + HttpHeader *BucketOriginHttpHeader `xml:"HttpHeader,omitempty"` + FollowRedirection bool `xml:"FollowRedirection,omitempty"` + HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"` + CopyOriginData bool `xml:"CopyOriginData,omitempty"` +} + +type BucketOriginHttpHeader struct { + // 目前还不支持 FollowAllHeaders + // FollowAllHeaders bool `xml:"FollowAllHeaders,omitempty"` + NewHttpHeaders []OriginHttpHeader `xml:"NewHttpHeaders>Header,omitempty"` + FollowHttpHeaders []OriginHttpHeader `xml:"FollowHttpHeaders>Header,omitempty"` +} + +type OriginHttpHeader struct { + Key string `xml:"Key,omitempty"` + Value string `xml:"Value,omitempty"` +} + +type BucketOriginInfo struct { + HostInfo string `xml:"HostInfo>HostName,omitempty"` + FileInfo *BucketOriginFileInfo `xml:"FileInfo,omitempty"` +} +type BucketOriginFileInfo struct { + PrefixDirective bool `xml:"PrefixDirective,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Suffix string `xml:"Suffix,omitempty"` +} + +type BucketGetOriginResult BucketPutOriginOptions + +func (s *BucketService) PutOrigin(ctx context.Context, opt *BucketPutOriginOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?origin", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetOrigin(ctx context.Context) (*BucketGetOriginResult, *Response, error) { + var res BucketGetOriginResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?origin", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +func (s *BucketService) DeleteOrigin(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?origin", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_part.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_part.go new file mode 100644 index 0000000..e77b420 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_part.go @@ -0,0 +1,57 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// ListMultipartUploadsResult is the result of ListMultipartUploads +type ListMultipartUploadsResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` + EncodingType string `xml:"Encoding-Type"` + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + MaxUploads int + IsTruncated bool + Uploads []struct { + Key string + UploadID string `xml:"UploadId"` + StorageClass string + Initiator *Initiator + Owner *Owner + Initiated string + } `xml:"Upload,omitempty"` + Prefix string + Delimiter string `xml:"delimiter,omitempty"` + CommonPrefixes []string `xml:"CommonPrefixs>Prefix,omitempty"` +} + +// ListMultipartUploadsOptions is the option of ListMultipartUploads +type ListMultipartUploadsOptions struct { + Delimiter string `url:"delimiter,omitempty"` + EncodingType string `url:"encoding-type,omitempty"` + Prefix string `url:"prefix,omitempty"` + MaxUploads int `url:"max-uploads,omitempty"` + KeyMarker string `url:"key-marker,omitempty"` + UploadIDMarker string `url:"upload-id-marker,omitempty"` +} + +// ListMultipartUploads 用来查询正在进行中的分块上传。单次最多列出1000个正在进行中的分块上传。 +// +// https://www.qcloud.com/document/product/436/7736 +func (s *BucketService) ListMultipartUploads(ctx context.Context, opt *ListMultipartUploadsOptions) (*ListMultipartUploadsResult, *Response, error) { + var res ListMultipartUploadsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?uploads", + method: http.MethodGet, + result: &res, + optQuery: opt, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_policy.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_policy.go new file mode 100644 index 0000000..5ec4f89 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_policy.go @@ -0,0 +1,71 @@ +package cos + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "strings" +) + +type BucketStatement struct { + Principal map[string][]string `json:"principal,omitempty"` + Action []string `json:"action,omitempty"` + Effect string `json:"effect,omitempty"` + Resource []string `json:"resource,omitempty"` + Condition map[string]map[string]interface{} `json:"condition,omitempty"` +} + +type BucketPutPolicyOptions struct { + Statement []BucketStatement `json:"statement,omitempty"` + Version string `json:"version,omitempty"` + Principal map[string][]string `json:"principal,omitempty"` +} + +type BucketGetPolicyResult BucketPutPolicyOptions + +func (s *BucketService) PutPolicy(ctx context.Context, opt *BucketPutPolicyOptions) (*Response, error) { + var f *strings.Reader + if opt != nil { + bs, err := json.Marshal(opt) + if err != nil { + return nil, err + } + body := string(bs) + f = strings.NewReader(body) + } + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?policy", + method: http.MethodPut, + body: f, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetPolicy(ctx context.Context) (*BucketGetPolicyResult, *Response, error) { + var bs bytes.Buffer + var res BucketGetPolicyResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?policy", + method: http.MethodGet, + result: &bs, + } + resp, err := s.client.doRetry(ctx, sendOpt) + if err == nil { + err = json.Unmarshal(bs.Bytes(), &res) + } + return &res, resp, err +} + +func (s *BucketService) DeletePolicy(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?policy", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_referer.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_referer.go new file mode 100644 index 0000000..cad5a73 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_referer.go @@ -0,0 +1,40 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type BucketPutRefererOptions struct { + XMLName xml.Name `xml:"RefererConfiguration"` + Status string `xml:"Status"` + RefererType string `xml:"RefererType"` + DomainList []string `xml:"DomainList>Domain"` + EmptyReferConfiguration string `xml:"EmptyReferConfiguration,omitempty"` +} + +type BucketGetRefererResult BucketPutRefererOptions + +func (s *BucketService) PutReferer(ctx context.Context, opt *BucketPutRefererOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?referer", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetReferer(ctx context.Context) (*BucketGetRefererResult, *Response, error) { + var res BucketGetRefererResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?referer", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_replication.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_replication.go new file mode 100644 index 0000000..9b2d07b --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_replication.go @@ -0,0 +1,69 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// ReplicationDestination is the sub struct of BucketReplicationRule +type ReplicationDestination struct { + Bucket string `xml:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty"` +} + +// BucketReplicationRule is the main param of replication +type BucketReplicationRule struct { + ID string `xml:"ID,omitempty"` + Status string `xml:"Status"` + Prefix string `xml:"Prefix"` + Destination *ReplicationDestination `xml:"Destination"` +} + +// PutBucketReplicationOptions is the options of PutBucketReplication +type PutBucketReplicationOptions struct { + XMLName xml.Name `xml:"ReplicationConfiguration"` + Role string `xml:"Role"` + Rule []BucketReplicationRule `xml:"Rule"` +} + +// GetBucketReplicationResult is the result of GetBucketReplication +type GetBucketReplicationResult PutBucketReplicationOptions + +// PutBucketReplication https://cloud.tencent.com/document/product/436/19223 +func (s *BucketService) PutBucketReplication(ctx context.Context, opt *PutBucketReplicationOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?replication", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err + +} + +// GetBucketReplication https://cloud.tencent.com/document/product/436/19222 +func (s *BucketService) GetBucketReplication(ctx context.Context) (*GetBucketReplicationResult, *Response, error) { + var res GetBucketReplicationResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?replication", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err + +} + +// DeleteBucketReplication https://cloud.tencent.com/document/product/436/19221 +func (s *BucketService) DeleteBucketReplication(ctx context.Context) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?replication", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_tagging.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_tagging.go new file mode 100644 index 0000000..e223ee1 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_tagging.go @@ -0,0 +1,69 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// BucketTaggingTag is the tag of BucketTagging +type BucketTaggingTag struct { + Key string + Value string +} + +// BucketGetTaggingResult is the result of BucketGetTagging +type BucketGetTaggingResult struct { + XMLName xml.Name `xml:"Tagging"` + TagSet []BucketTaggingTag `xml:"TagSet>Tag,omitempty"` +} + +// GetTagging 接口实现获取指定Bucket的标签。 +// +// https://www.qcloud.com/document/product/436/8277 +func (s *BucketService) GetTagging(ctx context.Context) (*BucketGetTaggingResult, *Response, error) { + var res BucketGetTaggingResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?tagging", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// BucketPutTaggingOptions is the option of BucketPutTagging +type BucketPutTaggingOptions struct { + XMLName xml.Name `xml:"Tagging"` + TagSet []BucketTaggingTag `xml:"TagSet>Tag,omitempty"` +} + +// PutTagging 接口实现给用指定Bucket打标签。用来组织和管理相关Bucket。 +// +// 当该请求设置相同Key名称,不同Value时,会返回400。请求成功,则返回204。 +// +// https://www.qcloud.com/document/product/436/8281 +func (s *BucketService) PutTagging(ctx context.Context, opt *BucketPutTaggingOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?tagging", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// DeleteTagging 接口实现删除指定Bucket的标签。 +// +// https://www.qcloud.com/document/product/436/8286 +func (s *BucketService) DeleteTagging(ctx context.Context) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?tagging", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_version.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_version.go new file mode 100644 index 0000000..c540af8 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_version.go @@ -0,0 +1,42 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// BucketPutVersionOptions is the options of PutBucketVersioning +type BucketPutVersionOptions struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` +} + +// BucketGetVersionResult is the result of GetBucketVersioning +type BucketGetVersionResult BucketPutVersionOptions + +// PutVersion https://cloud.tencent.com/document/product/436/19889 +// Status has Suspended\Enabled +func (s *BucketService) PutVersioning(ctx context.Context, opt *BucketPutVersionOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?versioning", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// GetVersion https://cloud.tencent.com/document/product/436/19888 +func (s *BucketService) GetVersioning(ctx context.Context) (*BucketGetVersionResult, *Response, error) { + var res BucketGetVersionResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?versioning", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_website.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_website.go new file mode 100644 index 0000000..f97f4d6 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/bucket_website.go @@ -0,0 +1,71 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type WebsiteRoutingRule struct { + ConditionErrorCode string `xml:"Condition>HttpErrorCodeReturnedEquals,omitempty"` + ConditionPrefix string `xml:"Condition>KeyPrefixEquals,omitempty"` + + RedirectProtocol string `xml:"Redirect>Protocol,omitempty"` + RedirectReplaceKey string `xml:"Redirect>ReplaceKeyWith,omitempty"` + RedirectReplaceKeyPrefix string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` +} + +type WebsiteRoutingRules struct { + Rules []WebsiteRoutingRule `xml:"RoutingRule,omitempty"` +} + +type ErrorDocument struct { + Key string `xml:"Key,omitempty"` +} + +type RedirectRequestsProtocol struct { + Protocol string `xml:"Protocol,omitempty"` +} + +type BucketPutWebsiteOptions struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + Index string `xml:"IndexDocument>Suffix"` + RedirectProtocol *RedirectRequestsProtocol `xml:"RedirectAllRequestsTo,omitempty"` + Error *ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules *WebsiteRoutingRules `xml:"RoutingRules,omitempty"` +} + +type BucketGetWebsiteResult BucketPutWebsiteOptions + +func (s *BucketService) PutWebsite(ctx context.Context, opt *BucketPutWebsiteOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?website", + method: http.MethodPut, + body: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +func (s *BucketService) GetWebsite(ctx context.Context) (*BucketGetWebsiteResult, *Response, error) { + var res BucketGetWebsiteResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?website", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +func (s *BucketService) DeleteWebsite(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?website", + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/ci.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci.go new file mode 100644 index 0000000..26e690a --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci.go @@ -0,0 +1,1834 @@ +package cos + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" +) + +type CIService service + +type PicOperations struct { + IsPicInfo int `json:"is_pic_info,omitempty"` + Rules []PicOperationsRules `json:"rules,omitemtpy"` +} +type PicOperationsRules struct { + Bucket string `json:"bucket,omitempty"` + FileId string `json:"fileid"` + Rule string `json:"rule"` +} + +func EncodePicOperations(pic *PicOperations) string { + if pic == nil { + return "" + } + bs, err := json.Marshal(pic) + if err != nil { + return "" + } + return string(bs) +} + +type ImageProcessResult struct { + XMLName xml.Name `xml:"UploadResult"` + OriginalInfo *PicOriginalInfo `xml:"OriginalInfo,omitempty"` + ProcessResults []PicProcessObject `xml:"ProcessResults>Object,omitempty"` +} +type PicOriginalInfo struct { + Key string `xml:"Key,omitempty"` + Location string `xml:"Location,omitempty"` + ImageInfo *PicImageInfo `xml:"ImageInfo,omitempty"` + ETag string `xml:"ETag,omitempty"` +} +type PicImageInfo struct { + Format string `xml:"Format,omitempty"` + Width int `xml:"Width,omitempty"` + Height int `xml:"Height,omitempty"` + Quality int `xml:"Quality,omitempty"` + Ave string `xml:"Ave,omitempty"` + Orientation int `xml:"Orientation,omitempty"` +} +type PicProcessObject struct { + Key string `xml:"Key,omitempty"` + Location string `xml:"Location,omitempty"` + Format string `xml:"Format,omitempty"` + Width int `xml:"Width,omitempty"` + Height int `xml:"Height,omitempty"` + Size int `xml:"Size,omitempty"` + Quality int `xml:"Quality,omitempty"` + ETag string `xml:"ETag,omitempty"` + WatermarkStatus int `xml:"WatermarkStatus,omitempty"` + CodeStatus int `xml:"CodeStatus,omitempty"` + QRcodeInfo []QRcodeInfo `xml:"QRcodeInfo,omitempty"` +} +type QRcodeInfo struct { + CodeUrl string `xml:"CodeUrl,omitempty"` + CodeLocation *CodeLocation `xml:"CodeLocation,omitempty"` +} +type CodeLocation struct { + Point []string `xml:"Point,omitempty"` +} + +type picOperationsHeader struct { + PicOperations string `header:"Pic-Operations" xml:"-" url:"-"` +} + +type ImageProcessOptions = PicOperations + +// 云上数据处理 https://cloud.tencent.com/document/product/460/18147 +func (s *CIService) ImageProcess(ctx context.Context, name string, opt *ImageProcessOptions) (*ImageProcessResult, *Response, error) { + header := &picOperationsHeader{ + PicOperations: EncodePicOperations(opt), + } + var res ImageProcessResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name) + "?image_process", + method: http.MethodPost, + optHeader: header, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// ImageRecognitionOptions is the option of ImageAuditing +type ImageRecognitionOptions struct { + CIProcess string `url:"ci-process,omitempty"` + DetectType string `url:"detect-type,omitempty"` + DetectUrl string `url:"detect-url,omitempty"` + Interval int `url:"interval,omitempty"` + MaxFrames int `url:"max-frames,omitempty"` + BizType string `url:"biz-type,omitempty"` + LargeImageDetect int `url:"large-image-detect,omitempty"` + DataId string `url:"dataid,omitempty"` + Async int `url:"async,omitempty"` + Callback string `url:"callback,omitempty"` +} + +// ImageRecognitionResult is the result of ImageRecognition/ImageAuditing +type ImageRecognitionResult struct { + XMLName xml.Name `xml:"RecognitionResult"` + JobId string `xml:"JobId,omitempty"` + Text string `xml:"Text,omitempty"` + Label string `xml:"Label,omitempty"` + Result int `xml:"Result,omitempty"` + Score int `xml:"Score,omitempty"` + Category string `xml:"Category,omitempty"` + SubLabel string `xml:"SubLabel,omitempty"` + PornInfo *RecognitionInfo `xml:"PornInfo,omitempty"` + TerroristInfo *RecognitionInfo `xml:"TerroristInfo,omitempty"` + PoliticsInfo *RecognitionInfo `xml:"PoliticsInfo,omitempty"` + AdsInfo *RecognitionInfo `xml:"AdsInfo,omitempty"` + TeenagerInfo *RecognitionInfo `xml:"TeenagerInfo,omitempty"` + TerrorismInfo *RecognitionInfo `xml:"TerrorismInfo,omitempty"` + CompressionResult int `xml:"CompressionResult,omitempty"` + DataId string `xml:"DataId,omitempty"` +} + +// RecognitionInfo is the result of auditing scene +type RecognitionInfo struct { + Code int `xml:"Code,omitempty"` + Msg string `xml:"Msg,omitempty"` + HitFlag int `xml:"HitFlag,omitempty"` + Score int `xml:"Score,omitempty"` + Label string `xml:"Label,omitempty"` + Count int `xml:"Count,omitempty"` + Category string `xml:"Category,omitempty"` + SubLabel string `xml:"SubLabel,omitempty"` + Keywords []string `xml:"Keywords,omitempty"` + OcrResults []OcrResult `xml:"OcrResults,omitempty"` + ObjectResults []ObjectResult `xml:"ObjectResults,omitempty"` + LibResults []LibResult `xml:"LibResults,omitempty"` +} + +// 图片审核 https://cloud.tencent.com/document/product/460/37318 +func (s *CIService) ImageRecognition(ctx context.Context, name string, DetectType string) (*ImageRecognitionResult, *Response, error) { + opt := &ImageRecognitionOptions{ + CIProcess: "sensitive-content-recognition", + DetectType: DetectType, + } + var res ImageRecognitionResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// 图片审核 支持detect-url等全部参数 +func (s *CIService) ImageAuditing(ctx context.Context, name string, opt *ImageRecognitionOptions) (*ImageRecognitionResult, *Response, error) { + var res ImageRecognitionResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UserExtraInfo is user defined information +type UserExtraInfo struct { + TokenId string `xml:",omitempty"` + Nickname string `xml:",omitempty"` + DeviceId string `xml:",omitempty"` + AppId string `xml:",omitempty"` + Room string `xml:",omitempty"` + IP string `xml:",omitempty"` + Type string `xml:",omitempty"` +} + +// ImageAuditingInputOptions is the option of BatchImageAuditingOptions +type ImageAuditingInputOptions struct { + DataId string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + Interval int `xml:",omitempty"` + MaxFrames int `xml:",omitempty"` + LargeImageDetect int `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// ImageAuditingJobConf is the config of BatchImageAuditingOptions +type ImageAuditingJobConf struct { + DetectType string `xml:",omitempty"` + BizType string `xml:",omitempty"` + Async int `xml:",omitempty"` + Callback string `xml:",omitempty"` +} + +// BatchImageAuditingOptions is the option of BatchImageAuditing +type BatchImageAuditingOptions struct { + XMLName xml.Name `xml:"Request"` + Input []ImageAuditingInputOptions `xml:"Input,omitempty"` + Conf *ImageAuditingJobConf `xml:"Conf"` +} + +// ImageAuditingResult is the result of BatchImageAuditingJobResult +type ImageAuditingResult struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + DataId string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + Text string `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + Score int `xml:",omitempty"` + Category string `xml:",omitempty"` + SubLabel string `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` + TeenagerInfo *RecognitionInfo `xml:",omitempty"` + CompressionResult int `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// BatchImageAuditingJobResult is the result of BatchImageAuditing +type BatchImageAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []ImageAuditingResult `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// 图片批量审核接口 +func (s *CIService) BatchImageAuditing(ctx context.Context, opt *BatchImageAuditingOptions) (*BatchImageAuditingJobResult, *Response, error) { + var res BatchImageAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/image/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetImageAuditingJobResult is the result of GetImageAuditingJob +type GetImageAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *ImageAuditingResult `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// 图片审核-查询任务 +func (s *CIService) GetImageAuditingJob(ctx context.Context, jobid string) (*GetImageAuditingJobResult, *Response, error) { + var res GetImageAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/image/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutVideoAuditingJobOptions is the option of PutVideoAuditingJob +type PutVideoAuditingJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputObject string `xml:"Input>Object,omitempty"` + InputUrl string `xml:"Input>Url,omitempty"` + InputDataId string `xml:"Input>DataId,omitempty"` + InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"` + Conf *VideoAuditingJobConf `xml:"Conf"` + Type string `xml:"Type,omitempty"` +} + +// VideoAuditingJobConf is the config of PutVideoAuditingJobOptions +type VideoAuditingJobConf struct { + DetectType string `xml:",omitempty"` + Snapshot *PutVideoAuditingJobSnapshot `xml:",omitempty"` + Callback string `xml:",omitempty"` + CallbackVersion string `xml:",omitempty"` + CallbackType int `xml:",omitempty"` + BizType string `xml:",omitempty"` + DetectContent int `xml:",omitempty"` +} + +// PutVideoAuditingJobSnapshot is the snapshot config of VideoAuditingJobConf +type PutVideoAuditingJobSnapshot struct { + Mode string `xml:",omitempty"` + Count int `xml:",omitempty"` + TimeInterval float32 `xml:",omitempty"` +} + +// PutVideoAuditingJobResult is the result of PutVideoAuditingJob +type PutVideoAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail struct { + JobId string `xml:"JobId,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + Object string `xml:"Object,omitempty"` + Url string `xml:"Url,omitempty"` + } `xml:"JobsDetail,omitempty"` + RequestId string `xml:"RequestId,omitempty"` +} + +// 视频审核-创建任务 https://cloud.tencent.com/document/product/460/46427 +func (s *CIService) PutVideoAuditingJob(ctx context.Context, opt *PutVideoAuditingJobOptions) (*PutVideoAuditingJobResult, *Response, error) { + var res PutVideoAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/video/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetVideoAuditingJobResult is the result of GetVideoAuditingJob +type GetVideoAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *AuditingJobDetail `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// AuditingJobDetail is the detail of GetVideoAuditingJobResult +type AuditingJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + DataId string `xml:",omitempty"` + SnapshotCount string `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` + TeenagerInfo *RecognitionInfo `xml:",omitempty"` + Snapshot []GetVideoAuditingJobSnapshot `xml:",omitempty"` + AudioSection []AudioSectionResult `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` + Type string `xml:",omitempty"` +} + +// GetVideoAuditingJobSnapshot is the snapshot result of AuditingJobDetail +type GetVideoAuditingJobSnapshot struct { + Url string `xml:",omitempty"` + Text string `xml:",omitempty"` + SnapshotTime int `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` + TeenagerInfo *RecognitionInfo `xml:",omitempty"` +} + +// AudioSectionResult is the audio section result of AuditingJobDetail/AudioAuditingJobDetail +type AudioSectionResult struct { + Url string `xml:",omitempty"` + Text string `xml:",omitempty"` + OffsetTime int `xml:",omitempty"` + Duration int `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` +} + +// 视频审核-查询任务 https://cloud.tencent.com/document/product/460/46926 +func (s *CIService) GetVideoAuditingJob(ctx context.Context, jobid string) (*GetVideoAuditingJobResult, *Response, error) { + var res GetVideoAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/video/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// 视频审核-取消直播流审核任务 +func (s *CIService) PostVideoAuditingCancelJob(ctx context.Context, jobid string) (*PutVideoAuditingJobResult, *Response, error) { + var res PutVideoAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/video/cancel_auditing" + jobid, + method: http.MethodPost, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutAudioAuditingJobOptions is the option of PutAudioAuditingJob +type PutAudioAuditingJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputObject string `xml:"Input>Object,omitempty"` + InputUrl string `xml:"Input>Url,omitempty"` + InputDataId string `xml:"Input>DataId,omitempty"` + InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"` + Conf *AudioAuditingJobConf `xml:"Conf"` +} + +// AudioAuditingJobConf is the config of PutAudioAuditingJobOptions +type AudioAuditingJobConf struct { + DetectType string `xml:",omitempty"` + Callback string `xml:",omitempty"` + CallbackVersion string `xml:",omitempty"` + BizType string `xml:",omitempty"` +} + +// PutAudioAuditingJobResult is the result of PutAudioAuditingJob +type PutAudioAuditingJobResult PutVideoAuditingJobResult + +// 音频审核-创建任务 https://cloud.tencent.com/document/product/460/53395 +func (s *CIService) PutAudioAuditingJob(ctx context.Context, opt *PutAudioAuditingJobOptions) (*PutAudioAuditingJobResult, *Response, error) { + var res PutAudioAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/audio/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetAudioAuditingJobResult is the result of GetAudioAuditingJob +type GetAudioAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *AudioAuditingJobDetail `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// AudioAuditingJobDetail is the detail of GetAudioAuditingJobResult +type AudioAuditingJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + DataId string `xml:",omitempty"` + AudioText string `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` + Section []AudioSectionResult `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// 音频审核-查询任务 https://cloud.tencent.com/document/product/460/53396 +func (s *CIService) GetAudioAuditingJob(ctx context.Context, jobid string) (*GetAudioAuditingJobResult, *Response, error) { + var res GetAudioAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/audio/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutTextAuditingJobOptions is the option of PutTextAuditingJob +type PutTextAuditingJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputObject string `xml:"Input>Object,omitempty"` + InputUrl string `xml:"Input>Url,omitempty"` + InputContent string `xml:"Input>Content,omitempty"` + InputDataId string `xml:"Input>DataId,omitempty"` + InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"` + Conf *TextAuditingJobConf `xml:"Conf"` +} + +// TextAuditingJobConf is the config of PutAudioAuditingJobOptions +type TextAuditingJobConf struct { + DetectType string `xml:",omitempty"` + Callback string `xml:",omitempty"` + CallbackVersion string `xml:",omitempty"` + BizType string `xml:",omitempty"` +} + +// PutTextAuditingJobResult is the result of PutTextAuditingJob +type PutTextAuditingJobResult GetTextAuditingJobResult + +// 文本审核-创建任务 https://cloud.tencent.com/document/product/436/56289 +func (s *CIService) PutTextAuditingJob(ctx context.Context, opt *PutTextAuditingJobOptions) (*PutTextAuditingJobResult, *Response, error) { + var res PutTextAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/text/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetTextAuditingJobResult is the result of GetTextAuditingJob +type GetTextAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *TextAuditingJobDetail `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// TextAuditingJobDetail is the detail of GetTextAuditingJobResult +type TextAuditingJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + DataId string `xml:",omitempty"` + Content string `xml:",omitempty"` + SectionCount int `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *TextRecognitionInfo `xml:",omitempty"` + TerrorismInfo *TextRecognitionInfo `xml:",omitempty"` + PoliticsInfo *TextRecognitionInfo `xml:",omitempty"` + AdsInfo *TextRecognitionInfo `xml:",omitempty"` + IllegalInfo *TextRecognitionInfo `xml:",omitempty"` + AbuseInfo *TextRecognitionInfo `xml:",omitempty"` + Section []TextSectionResult `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// TextLibResult +type TextLibResult struct { + LibType int32 `xml:"LibType,omitempty"` + LibName string `xml:"LibName,omitempty"` + Keywords []string `xml:"Keywords,omitempty"` +} + +// TextRecognitionInfo +type TextRecognitionInfo struct { + Code int `xml:",omitempty"` + HitFlag int `xml:",omitempty"` + Score int `xml:",omitempty"` + Count int `xml:",omitempty"` + Keywords string `xml:",omitempty"` + LibResults []TextLibResult `xml:",omitempty"` +} + +// TextSectionResult is the section result of TextAuditingJobDetail +type TextSectionResult struct { + StartByte int `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PornInfo *TextRecognitionInfo `xml:",omitempty"` + TerrorismInfo *TextRecognitionInfo `xml:",omitempty"` + PoliticsInfo *TextRecognitionInfo `xml:",omitempty"` + AdsInfo *TextRecognitionInfo `xml:",omitempty"` + IllegalInfo *TextRecognitionInfo `xml:",omitempty"` + AbuseInfo *TextRecognitionInfo `xml:",omitempty"` +} + +// 文本审核-查询任务 https://cloud.tencent.com/document/product/436/56288 +func (s *CIService) GetTextAuditingJob(ctx context.Context, jobid string) (*GetTextAuditingJobResult, *Response, error) { + var res GetTextAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/text/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutDocumentAuditingJobOptions is the option of PutDocumentAuditingJob +type PutDocumentAuditingJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputObject string `xml:"Input>Object,omitempty"` + InputUrl string `xml:"Input>Url,omitempty"` + InputType string `xml:"Input>Type,omitempty"` + InputDataId string `xml:"Input>DataId,omitempty"` + InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"` + Conf *DocumentAuditingJobConf `xml:"Conf"` +} + +// DocumentAuditingJobConf is the config of PutDocumentAuditingJobOptions +type DocumentAuditingJobConf struct { + DetectType string `xml:",omitempty"` + Callback string `xml:",omitempty"` + BizType string `xml:",omitempty"` +} + +// PutDocumentAuditingJobResult is the result of PutDocumentAuditingJob +type PutDocumentAuditingJobResult PutVideoAuditingJobResult + +// 文档审核-创建任务 https://cloud.tencent.com/document/product/436/59381 +func (s *CIService) PutDocumentAuditingJob(ctx context.Context, opt *PutDocumentAuditingJobOptions) (*PutDocumentAuditingJobResult, *Response, error) { + var res PutDocumentAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/document/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetDocumentAuditingJobResult is the result of GetDocumentAuditingJob +type GetDocumentAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *DocumentAuditingJobDetail `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// DocumentAuditingJobDetail is the detail of GetDocumentAuditingJobResult +type DocumentAuditingJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + DataId string `xml:",omitempty"` + PageCount int `xml:",omitempty"` + Label string `xml:",omitempty"` + Suggestion int `xml:",omitempty"` + Labels *DocumentResultInfo `xml:",omitempty"` + PageSegment *DocumentPageSegmentInfo `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// DocumentResultInfo +type DocumentResultInfo struct { + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` +} + +// DocumentPageSegmentInfo +type DocumentPageSegmentInfo struct { + Results []DocumentPageSegmentResultResult `xml:",omitempty"` +} + +// DocumentPageSegmentResultResult +type DocumentPageSegmentResultResult struct { + Url string `xml:",omitempty"` + Text string `xml:",omitempty"` + PageNumber int `xml:",omitempty"` + SheetNumber int `xml:",omitempty"` + Label string `xml:",omitempty"` + Suggestion int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` +} + +// OcrResult +type OcrResult struct { + Text string `xml:"Text,omitempty"` + Keywords []string `xml:"Keywords,omitempty"` + Location *Location `xml:"Location,omitempty"` +} + +// ObjectResult +type ObjectResult struct { + Name string `xml:"Name,omitempty"` + Location *Location `xml:"Location,omitempty"` +} + +// LibResult +type LibResult struct { + ImageId string `xml:"ImageId,omitempty"` + Score uint32 `xml:"Score,omitempty"` + TextLibResult +} + +// Location +type Location struct { + X float64 `xml:"X,omitempty"` // 左上角横坐标 + Y float64 `xml:"Y,omitempty"` // 左上角纵坐标 + Width float64 `xml:"Width,omitempty"` // 宽度 + Height float64 `xml:"Height,omitempty"` // 高度 + Rotate float64 `xml:"Rotate,omitempty"` // 检测框的旋转角度 +} + +// 文档审核-查询任务 https://cloud.tencent.com/document/product/436/59382 +func (s *CIService) GetDocumentAuditingJob(ctx context.Context, jobid string) (*GetDocumentAuditingJobResult, *Response, error) { + var res GetDocumentAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/document/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutWebpageAuditingJobOptions is the option of PutWebpageAuditingJob +type PutWebpageAuditingJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputUrl string `xml:"Input>Url,omitempty"` + InputDataId string `xml:"Input>DataId,omitempty"` + InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"` + Conf *WebpageAuditingJobConf `xml:"Conf"` +} + +// WebpageAuditingJobConf is the config of PutWebpageAuditingJobOptions +type WebpageAuditingJobConf struct { + DetectType string `xml:",omitempty"` + Callback string `xml:",omitempty"` + ReturnHighlightHtml bool `xml:",omitempty"` +} + +// PutWebpageAuditingJobResult is the result of PutWebpageAuditingJob +type PutWebpageAuditingJobResult PutVideoAuditingJobResult + +// 网页审核-创建任务 https://cloud.tencent.com/document/product/436/63958 +func (s *CIService) PutWebpageAuditingJob(ctx context.Context, opt *PutWebpageAuditingJobOptions) (*PutWebpageAuditingJobResult, *Response, error) { + var res PutWebpageAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/webpage/auditing", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetWebpageAuditingJobResult is the result of GetWebpageAuditingJob +type GetWebpageAuditingJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *WebpageAuditingJobDetail `xml:",omitempty"` +} + +// WebpageAuditingJobDetail is the detail of GetWebpageAuditingJobResult +type WebpageAuditingJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Url string `xml:",omitempty"` + Labels *WebpageResultInfo `xml:",omitempty"` + PageCount int `xml:",omitempty"` + Suggestion int `xml:",omitempty"` + ImageResults *WebpageImageResults `xml:",omitempty"` + TextResults *WebpageTextResults `xml:",omitempty"` + HighlightHtml string `xml:",omitempty"` + DataId string `xml:",omitempty"` + UserInfo *UserExtraInfo `xml:",omitempty"` +} + +// WebpageResultInfo +type WebpageResultInfo struct { + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` +} + +// WebpageImageResults +type WebpageImageResults struct { + Results []WebpageImageResult `xml:",omitempty"` +} + +// WebpageImageResult +type WebpageImageResult struct { + Url string `xml:",omitempty"` + Text string `xml:",omitempty"` + Label string `xml:",omitempty"` + PageNumber int `xml:",omitempty"` + SheetNumber int `xml:",omitempty"` + Suggestion int `xml:",omitempty"` + PornInfo *RecognitionInfo `xml:",omitempty"` + TerrorismInfo *RecognitionInfo `xml:",omitempty"` + PoliticsInfo *RecognitionInfo `xml:",omitempty"` + AdsInfo *RecognitionInfo `xml:",omitempty"` +} + +// WebpageTextResults +type WebpageTextResults struct { + Results []WebpageTextResult `xml:",omitempty"` +} + +// WebpageTextResult +type WebpageTextResult struct { + Text string `xml:",omitempty"` + Label string `xml:",omitempty"` + Result int `xml:",omitempty"` + PageNumber int `xml:",omitempty"` + SheetNumber int `xml:",omitempty"` + Suggestion int `xml:",omitempty"` + PornInfo *TextRecognitionInfo `xml:",omitempty"` + TerrorismInfo *TextRecognitionInfo `xml:",omitempty"` + PoliticsInfo *TextRecognitionInfo `xml:",omitempty"` + AdsInfo *TextRecognitionInfo `xml:",omitempty"` + IllegalInfo *TextRecognitionInfo `xml:",omitempty"` + AbuseInfo *TextRecognitionInfo `xml:",omitempty"` +} + +// 网页审核-查询任务 https://cloud.tencent.com/document/product/436/63959 +func (s *CIService) GetWebpageAuditingJob(ctx context.Context, jobid string) (*GetWebpageAuditingJobResult, *Response, error) { + var res GetWebpageAuditingJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/webpage/auditing/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// ReportBadcaseOptions +type ReportBadcaseOptions struct { + XMLName xml.Name `xml:"Request"` + ContentType int `xml:",omitempty"` + Text string `xml:",omitempty"` + Url string `xml:",omitempty"` + Label string `xml:",omitempty"` + SuggestedLabel string `xml:",omitempty"` + JobId string `xml:",omitempty"` + ModerationTime string `xml:",omitempty"` +} + +// ReportBadcaseResult +type ReportBadcaseResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:",omitempty"` +} + +// 提交Badcase +func (s *CIService) ReportBadcase(ctx context.Context, opt *ReportBadcaseOptions) (*ReportBadcaseResult, *Response, error) { + var res ReportBadcaseResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/report/badcase", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// PutVirusDetectJobOptions is the option of PutVirusDetectJob +type PutVirusDetectJobOptions struct { + XMLName xml.Name `xml:"Request"` + InputObject string `xml:"Input>Object,omitempty"` + InputUrl string `xml:"Input>Url,omitempty"` + Conf *VirusDetectJobConf `xml:"Conf"` +} + +// VirusDetectJobConf is the config of PutVirusDetectJobOptions +type VirusDetectJobConf struct { + DetectType string `xml:",omitempty"` + Callback string `xml:",omitempty"` +} + +// PutVirusDetectJobResult is the result of PutVirusDetectJob +type PutVirusDetectJobResult PutVideoAuditingJobResult + +// 云查毒接口-提交病毒检测任务 https://cloud.tencent.com/document/product/436/63961 +func (s *CIService) PutVirusDetectJob(ctx context.Context, opt *PutVirusDetectJobOptions) (*PutVirusDetectJobResult, *Response, error) { + var res PutVirusDetectJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/virus/detect", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetVirusDetectJobResult is the result of GetVirusDetectJob +type GetVirusDetectJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *VirusDetectJobDetail `xml:",omitempty"` + RequestId string `xml:",omitempty"` +} + +// VirusDetectJobDetail is the detail of GetVirusDetectJobResult +type VirusDetectJobDetail struct { + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + JobId string `xml:",omitempty"` + State string `xml:",omitempty"` + CreationTime string `xml:",omitempty"` + Object string `xml:",omitempty"` + Url string `xml:",omitempty"` + Suggestion string `xml:",omitempty"` + DetectDetail *VirusResults `xml:",omitempty"` +} + +// VirusResults +type VirusResults struct { + Result []VirusInfo `xml:",omitempty"` +} + +// VirusInfo +type VirusInfo struct { + FileName string `xml:",omitempty"` + VirusName string `xml:",omitempty"` +} + +// 云查毒接口-查询病毒检测任务结果 https://cloud.tencent.com/document/product/436/63962 +func (s *CIService) GetVirusDetectJob(ctx context.Context, jobid string) (*GetVirusDetectJobResult, *Response, error) { + var res GetVirusDetectJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/virus/detect/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// 图片持久化处理-上传时处理 https://cloud.tencent.com/document/product/460/18147 +// 盲水印-上传时添加 https://cloud.tencent.com/document/product/460/19017 +// 二维码识别-上传时识别 https://cloud.tencent.com/document/product/460/37513 +func (s *CIService) Put(ctx context.Context, name string, r io.Reader, uopt *ObjectPutOptions) (*ImageProcessResult, *Response, error) { + if r == nil { + return nil, nil, fmt.Errorf("reader is nil") + } + if err := CheckReaderLen(r); err != nil { + return nil, nil, err + } + opt := CloneObjectPutOptions(uopt) + totalBytes, err := GetReaderLen(r) + if err != nil && opt != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return nil, nil, err + } + totalBytes = opt.ContentLength + } + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader由用户指定ContentLength, 或使用 Chunk 上传 + // if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + // opt.ContentLength = totalBytes + // } + // lilang : 2022-07-04 + // 图片cgi不设置content-length的话,读不到body。图片处理cgi暂时不支持chunked,后面会修复。 + if opt != nil && opt.ContentLength == 0 { + opt.ContentLength = totalBytes + } + + } + reader := TeeReader(r, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = crc64.New(crc64.MakeTable(crc64.ECMA)) + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + + var res ImageProcessResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodPut, + body: reader, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + + return &res, resp, err +} + +// ci put object from local file +func (s *CIService) PutFromFile(ctx context.Context, name string, filePath string, opt *ObjectPutOptions) (*ImageProcessResult, *Response, error) { + fd, err := os.Open(filePath) + if err != nil { + return nil, nil, err + } + defer fd.Close() + + return s.Put(ctx, name, fd, opt) +} + +// 基本图片处理 https://cloud.tencent.com/document/product/460/36540 +// 盲水印-下载时添加 https://cloud.tencent.com/document/product/460/19017 +func (s *CIService) Get(ctx context.Context, name string, operation string, opt *ObjectGetOptions, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s&%s", encodeURIComponent(name), id[0], encodeURIComponent(operation)) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?%s", encodeURIComponent(name), encodeURIComponent(operation)) + } else { + return nil, errors.New("wrong params") + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + optHeader: opt, + disableCloseBody: true, + } + resp, err := s.client.send(ctx, &sendOpt) + + if opt != nil && opt.Listener != nil { + if err == nil && resp != nil { + if totalBytes, e := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64); e == nil { + resp.Body = TeeReader(resp.Body, nil, totalBytes, opt.Listener) + } + } + } + return resp, err +} + +func (s *CIService) GetToFile(ctx context.Context, name, localpath, operation string, opt *ObjectGetOptions, id ...string) (*Response, error) { + resp, err := s.Get(ctx, name, operation, opt, id...) + if err != nil { + return resp, err + } + defer resp.Body.Close() + + // If file exist, overwrite it + fd, err := os.OpenFile(localpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) + if err != nil { + return resp, err + } + + _, err = io.Copy(fd, resp.Body) + fd.Close() + if err != nil { + return resp, err + } + + return resp, nil +} + +type GetQRcodeResult struct { + XMLName xml.Name `xml:"Response"` + CodeStatus int `xml:"CodeStatus,omitempty"` + QRcodeInfo *QRcodeInfo `xml:"QRcodeInfo,omitempty"` + ResultImage string `xml:"ResultImage,omitempty"` +} + +// 二维码识别-下载时识别 https://cloud.tencent.com/document/product/436/54070 +func (s *CIService) GetQRcode(ctx context.Context, name string, cover int, opt *ObjectGetOptions, id ...string) (*GetQRcodeResult, *Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s&ci-process=QRcode&cover=%v", encodeURIComponent(name), id[0], cover) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?ci-process=QRcode&cover=%v", encodeURIComponent(name), cover) + } else { + return nil, nil, errors.New("wrong params") + } + + var res GetQRcodeResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type GenerateQRcodeOptions struct { + QRcodeContent string `url:"qrcode-content,omitempty"` + Mode int `url:"mode,omitempty"` + Width int `url:"width,omitempty"` +} +type GenerateQRcodeResult struct { + XMLName xml.Name `xml:"Response"` + ResultImage string `xml:"ResultImage,omitempty"` +} + +// 二维码生成 https://cloud.tencent.com/document/product/436/54071 +func (s *CIService) GenerateQRcode(ctx context.Context, opt *GenerateQRcodeOptions) (*GenerateQRcodeResult, *Response, error) { + var res GenerateQRcodeResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?ci-process=qrcode-generate", + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) GenerateQRcodeToFile(ctx context.Context, filePath string, opt *GenerateQRcodeOptions) (*GenerateQRcodeResult, *Response, error) { + res, resp, err := s.GenerateQRcode(ctx, opt) + if err != nil { + return res, resp, err + } + + // If file exist, overwrite it + fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) + if err != nil { + return res, resp, err + } + defer fd.Close() + + bs, err := base64.StdEncoding.DecodeString(res.ResultImage) + if err != nil { + return res, resp, err + } + fb := bytes.NewReader(bs) + _, err = io.Copy(fd, fb) + + return res, resp, err +} + +// 开通 Guetzli 压缩 https://cloud.tencent.com/document/product/460/30112 +func (s *CIService) PutGuetzli(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/?guetzli", + method: http.MethodPut, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +type GetGuetzliResult struct { + XMLName xml.Name `xml:"GuetzliStatus"` + GuetzliStatus string `xml:",chardata"` +} + +// 查询 Guetzli 状态 https://cloud.tencent.com/document/product/460/30111 +func (s *CIService) GetGuetzli(ctx context.Context) (*GetGuetzliResult, *Response, error) { + var res GetGuetzliResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/?guetzli", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +// 关闭 Guetzli 压缩 https://cloud.tencent.com/document/product/460/30113 +func (s *CIService) DeleteGuetzli(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/?guetzli", + method: http.MethodDelete, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +type AddStyleOptions struct { + XMLName xml.Name `xml:"AddStyle"` + StyleName string `xml:"StyleName,omitempty"` + StyleBody string `xml:"StyleBody,omitempty"` +} + +type GetStyleOptions struct { + XMLName xml.Name `xml:"GetStyle"` + StyleName string `xml:"StyleName,omitempty"` +} + +type GetStyleResult struct { + XMLName xml.Name `xml:"StyleList"` + StyleRule []StyleRule `xml:"StyleRule,omitempty"` +} + +type StyleRule struct { + StyleName string `xml:"StyleName,omitempty"` + StyleBody string `xml:"StyleBody,omitempty"` +} + +type DeleteStyleOptions struct { + XMLName xml.Name `xml:"DeleteStyle"` + StyleName string `xml:"StyleName,omitempty"` +} + +func (s *CIService) AddStyle(ctx context.Context, opt *AddStyleOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodPut, + uri: "/?style", + body: opt, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +func (s *CIService) GetStyle(ctx context.Context, opt *GetStyleOptions) (*GetStyleResult, *Response, error) { + var res GetStyleResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodGet, + uri: "/?style", + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) DeleteStyle(ctx context.Context, opt *DeleteStyleOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodDelete, + uri: "/?style", + body: opt, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +type ImageQualityResult struct { + XMLName xml.Name `xml:"Response"` + LongImage bool `xml:"LongImage,omitempty"` + BlackAndWhite bool `xml:"BlackAndWhite,omitempty"` + SmallImage bool `xml:"SmallImage,omitempty"` + BigImage bool `xml:"BigImage,omitempty"` + PureImage bool `xml:"PureImage,omitempty"` + ClarityScore int `xml:"ClarityScore,omitempty"` + AestheticScore int `xml:"AestheticScore,omitempty"` + RequestId string `xml:"RequestId,omitempty"` +} + +// ImageQuality 图片质量评估 +func (s *CIService) ImageQuality(ctx context.Context, obj string) (*ImageQualityResult, *Response, error) { + var res ImageQualityResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(obj) + "?ci-process=AssessQuality", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type OcrRecognitionOptions struct { + Type string `url:"type,omitempty"` + LanguageType string `url:"language-type,omitempty"` + Ispdf bool `url:"ispdf,omitempty"` + PdfPageNumber int `url:"pdf-pagenumber,omitempty"` + Isword bool `url:"isword,omitempty"` + EnableWordPolygon bool `url:"enable-word-polygon,omitempty"` +} + +type OcrRecognitionResult struct { + XMLName xml.Name `xml:"Response"` + TextDetections []TextDetections `xml:"TextDetections,omitempty"` + Language string `xml:"Language,omitempty"` + Angel float64 `xml:"Angel,omitempty"` + PdfPageSize int `xml:"PdfPageSize,omitempty"` + RequestId string `xml:"RequestId,omitempty"` +} + +type TextDetections struct { + DetectedText string `xml:"DetectedText,omitempty"` + Confidence int `xml:"Confidence,omitempty"` + Polygon []Polygon `xml:"Polygon,omitempty"` + ItemPolygon []ItemPolygon `xml:"ItemPolygon,omitempty"` + Words []Words `xml:"Words,omitempty"` + WordPolygon []WordPolygon `xml:"WordPolygon,omitempty"` +} + +type Polygon struct { + X int `xml:"X,omitempty"` + Y int `xml:"Y,omitempty"` +} + +// ItemPolygon TODO +type ItemPolygon struct { + X int `xml:"X,omitempty"` + Y int `xml:"Y,omitempty"` + Width int `xml:"Width,omitempty"` + Height int `xml:"Height,omitempty"` +} + +type Words struct { + Confidence int `xml:"Confidence,omitempty"` + Character string `xml:"Character,omitempty"` + WordCoordPoint *WordCoordPoint `xml:"WordCoordPoint,omitempty"` +} + +type WordCoordPoint struct { + WordCoordinate []Polygon `xml:"WordCoordinate,omitempty"` +} + +type WordPolygon struct { + LeftTop *Polygon `xml:"LeftTop,omitempty"` + RightTop *Polygon `xml:"RightTop,omitempty"` + RightBottom *Polygon `xml:"RightBottom,omitempty"` + LeftBottom *Polygon `xml:"LeftBottom,omitempty"` +} + +// OcrRecognition OCR通用文字识别 +func (s *CIService) OcrRecognition(ctx context.Context, obj string, opt *OcrRecognitionOptions) (*OcrRecognitionResult, *Response, error) { + var res OcrRecognitionResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(obj) + "?ci-process=OCR", + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type DetectCarResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + CarTags []CarTags `xml:"CarTags,omitempty"` +} + +type CarTags struct { + Serial string `xml:"Serial,omitempty"` + Brand string `xml:"Brand,omitempty"` + Type string `xml:"Type,omitempty"` + Color string `xml:"Color,omitempty"` + Confidence int `xml:"Confidence,omitempty"` + Year int `xml:"Year,omitempty"` + CarLocation []CarLocation `xml:"CarLocation,omitempty"` + PlateContent []PlateContent `xml:"PlateContent,omitempty"` +} + +type CarLocation struct { + X int `xml:"X,omitempty"` + Y int `xml:"Y,omitempty"` +} + +type PlateContent struct { + Plate string `xml:"Plate,omitempty"` + Color string `xml:"Color,omitempty"` + Type string `xml:"Type,omitempty"` + PlateLocation *PlateLocation `xml:"PlateLocation,omitempty"` +} + +type PlateLocation struct { + X int `xml:"X,omitempty"` + Y int `xml:"Y,omitempty"` +} + +// DetectCar 车辆车牌检测 +func (s *CIService) DetectCar(ctx context.Context, obj string) (*DetectCarResult, *Response, error) { + var res DetectCarResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(obj) + "?ci-process=DetectCar", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type CIServiceResult struct { + XMLName xml.Name `xml:"CIStatus"` + CIStatus string `xml:",chardata"` +} + +func (s *CIService) OpenCIService(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodPut, + uri: "/", + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +func (s *CIService) GetCIService(ctx context.Context) (*CIServiceResult, *Response, error) { + var res CIServiceResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodGet, + uri: "/", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) CloseCIService(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodPut, + uri: "/?unbind", + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +type HotLinkOptions struct { + XMLName xml.Name `xml:"Hotlink"` + Url []string `xml:"Url,omitempty"` + Type string `xml:"Type,omitempty"` +} + +type HotLinkResult struct { + XMLName xml.Name `xml:"Hotlink"` + Status string `xml:"Status,omitempty"` + Type string `xml:"Type,omitempty"` + Url []string `xml:"Url,omitempty"` +} + +func (s *CIService) SetHotLink(ctx context.Context, opt *HotLinkOptions) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodPut, + uri: "/?hotlink", + body: opt, + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +func (s *CIService) GetHotLink(ctx context.Context) (*HotLinkResult, *Response, error) { + var res HotLinkResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodGet, + uri: "/?hotlink", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type OriginProtectResult struct { + XMLName xml.Name `xml:"OriginProtectStatus"` + OriginProtectStatus string `xml:",chardata"` +} + +func (s *CIService) OpenOriginProtect(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodPut, + uri: "/?origin-protect", + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +func (s *CIService) GetOriginProtect(ctx context.Context) (*OriginProtectResult, *Response, error) { + var res OriginProtectResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodGet, + uri: "/?origin-protect", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) CloseOriginProtect(ctx context.Context) (*Response, error) { + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodDelete, + uri: "/?origin-protect", + } + resp, err := s.client.send(ctx, sendOpt) + return resp, err +} + +type PicTagResult struct { + XMLName xml.Name `xml:"RecognitionResult"` + Labels []PicTag `xml:"Labels,omitempty"` +} + +type PicTag struct { + Confidence int `xml:"Confidence,omitempty"` + Name string `xml:"Name,omitempty"` +} + +func (s *CIService) PicTag(ctx context.Context, obj string) (*PicTagResult, *Response, error) { + var res PicTagResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.CIURL, + method: http.MethodGet, + uri: "/" + encodeURIComponent(obj) + "?ci-process=detect-label", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type DetectFaceOptions struct { + MaxFaceNum int `url:"max-face-num,omitempty"` +} + +type DetectFaceResult struct { + XMLName xml.Name `xml:"Response"` + ImageWidth int `xml:"ImageWidth,omitempty"` + ImageHeight int `xml:"ImageHeight,omitempty"` + FaceModelVersion string `xml:"FaceModelVersion,omitempty"` + RequestId string `xml:"RequestId,omitempty"` + FaceInfos []FaceInfos `xml:"FaceInfos,omitempty"` +} + +type FaceInfos struct { + X int `xml:"X,omitempty"` + Y int `xml:"Y,omitempty"` + Width int `xml:"Width,omitempty"` + Height int `xml:"Height,omitempty"` +} + +func (s *CIService) DetectFace(ctx context.Context, obj string, opt *DetectFaceOptions) (*DetectFaceResult, *Response, error) { + var res DetectFaceResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/" + encodeURIComponent(obj) + "?ci-process=DetectFace", + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type FaceEffectOptions struct { + Type string `url:"type,omitempty"` + Whitening int `url:"whitening,omitempty"` + Smoothing int `url:"smoothing,omitempty"` + FaceLifting int `url:"faceLifting,omitempty"` + EyeEnlarging int `url:"eyeEnlarging,omitempty"` + Gender int `url:"gender,omitempty"` + Age int `url:"age,omitempty"` +} + +type FaceEffectResult struct { + XMLName xml.Name `xml:"Response"` + ResultImage string `xml:"ResultImage,omitempty"` + ResultMask string `xml:"ResultMask,omitempty"` +} + +func (s *CIService) FaceEffect(ctx context.Context, obj string, opt *FaceEffectOptions) (*FaceEffectResult, *Response, error) { + var res FaceEffectResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/" + encodeURIComponent(obj) + "?ci-process=face-effect", + optQuery: opt, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type IdCardOCROptions struct { + CardSide string `url:"CardSide,omitempty"` + Config *IdCardOCROptionsConfig `url:"Config,omitempty"` +} + +type IdCardOCROptionsConfig struct { + CropIdCard bool `json:"CropIdCard,omitempty"` + CropPortrait bool `json:"CropPortrait,omitempty"` + CopyWarn bool `json:"CopyWarn,omitempty"` + BorderCheckWarn bool `json:"BorderCheckWarn,omitempty"` + ReshootWarn bool `json:"ReshootWarn,omitempty"` + DetectPsWarn bool `json:"DetectPsWarn,omitempty"` + TempIdWarn bool `json:"TempIdWarn,omitempty"` + InvalidDateWarn bool `json:"InvalidDateWarn,omitempty"` + Quality bool `json:"Quality,omitempty"` + MultiCardDetect bool `json:"MultiCardDetect,omitempty"` +} + +func (c *IdCardOCROptionsConfig) EncodeValues(key string, v *url.Values) error { + config, err := json.Marshal(c) + if err != nil { + return err + } + v.Add("Config", string(config)) + + return nil +} + +type IdCardOCRResult struct { + XMLName xml.Name `xml:"Response"` + IdInfo *IdCardInfo `xml:"IdInfo,omitempty"` + AdvancedInfo *IdCardAdvancedInfo `xml:"AdvancedInfo,omitempty"` +} + +type IdCardInfo struct { + Name string `xml:"Name,omitempty"` + Sex string `xml:"Sex,omitempty"` + Nation string `xml:"Nation,omitempty"` + Birth string `xml:"Birth,omitempty"` + Address string `xml:"Address,omitempty"` + IdNum string `xml:"IdNum,omitempty"` + Authority string `xml:"Authority,omitempty"` + ValidDate string `xml:"ValidDate,omitempty"` +} + +type IdCardAdvancedInfo struct { + IdCard string `xml:"IdCard,omitempty"` + Portrait string `xml:"Portrait,omitempty"` + Quality string `xml:"Quality,omitempty"` + BorderCodeValue string `xml:"BorderCodeValue,omitempty"` + WarnInfos []string `xml:"WarnInfos,omitempty"` +} + +func (s *CIService) IdCardOCRWhenCloud(ctx context.Context, obj string, query *IdCardOCROptions) (*IdCardOCRResult, *Response, error) { + var res IdCardOCRResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/" + encodeURIComponent(obj) + "?ci-process=IDCardOCR", + optQuery: query, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) IdCardOCRWhenUpload(ctx context.Context, obj, filePath string, query *IdCardOCROptions, header *ObjectPutOptions) (*IdCardOCRResult, *Response, error) { + fd, err := os.Open(filePath) + if err != nil { + return nil, nil, err + } + defer fd.Close() + + if err := CheckReaderLen(fd); err != nil { + return nil, nil, err + } + opt := CloneObjectPutOptions(header) + totalBytes, err := GetReaderLen(fd) + if err != nil && opt != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return nil, nil, err + } + totalBytes = opt.ContentLength + } + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader由用户指定ContentLength, 或使用 Chunk 上传 + // if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + // opt.ContentLength = totalBytes + // } + // lilang : 2022-07-04 + // 图片cgi不设置content-length的话,读不到body。图片处理cgi暂时不支持chunked,后面会修复。 + if opt != nil && opt.ContentLength == 0 { + opt.ContentLength = totalBytes + } + + } + reader := TeeReader(fd, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = crc64.New(crc64.MakeTable(crc64.ECMA)) + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + + var res IdCardOCRResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(obj) + "?ci-process=IDCardOCR", + method: http.MethodPut, + optQuery: query, + body: reader, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + + return &res, resp, err +} + +type GetLiveCodeResult struct { + XMLName xml.Name `xml:"Response"` + LiveCode string `xml:"LiveCode,omitempty"` +} + +func (s *CIService) GetLiveCode(ctx context.Context) (*GetLiveCodeResult, *Response, error) { + var res GetLiveCodeResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/?ci-process=GetLiveCode", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type GetActionSequenceResult struct { + XMLName xml.Name `xml:"Response"` + ActionSequence string `xml:"ActionSequence,omitempty"` +} + +func (s *CIService) GetActionSequence(ctx context.Context) (*GetActionSequenceResult, *Response, error) { + var res GetActionSequenceResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/?ci-process=GetActionSequence", + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +type LivenessRecognitionOptions struct { + IdCard string `url:"IdCard,omitempty"` + Name string `url:"Name,omitempty"` + LivenessType string `url:"LivenessType,omitempty"` + ValidateData string `url:"ValidateData,omitempty"` + BestFrameNum int `url:"BestFrameNum,omitempty"` +} + +type LivenessRecognitionResult struct { + XMLName xml.Name `xml:"Response"` + BestFrameBase64 string `xml:"BestFrameBase64,omitempty"` + Sim float64 `xml:"Sim,omitempty"` + BestFrameList []string `xml:"BestFrameList,omitempty"` +} + +func (s *CIService) LivenessRecognitionWhenCloud(ctx context.Context, obj string, query *LivenessRecognitionOptions) (*LivenessRecognitionResult, *Response, error) { + var res LivenessRecognitionResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + method: http.MethodGet, + uri: "/" + encodeURIComponent(obj) + "?ci-process=LivenessRecognition", + optQuery: query, + result: &res, + } + resp, err := s.client.send(ctx, sendOpt) + return &res, resp, err +} + +func (s *CIService) LivenessRecognitionWhenUpload(ctx context.Context, obj, filePath string, query *LivenessRecognitionOptions, header *ObjectPutOptions) (*LivenessRecognitionResult, *Response, error) { + fd, err := os.Open(filePath) + if err != nil { + return nil, nil, err + } + defer fd.Close() + + if err := CheckReaderLen(fd); err != nil { + return nil, nil, err + } + opt := CloneObjectPutOptions(header) + totalBytes, err := GetReaderLen(fd) + if err != nil && opt != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return nil, nil, err + } + totalBytes = opt.ContentLength + } + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader由用户指定ContentLength, 或使用 Chunk 上传 + // if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + // opt.ContentLength = totalBytes + // } + // lilang : 2022-07-04 + // 图片cgi不设置content-length的话,读不到body。图片处理cgi暂时不支持chunked,后面会修复。 + if opt != nil && opt.ContentLength == 0 { + opt.ContentLength = totalBytes + } + + } + reader := TeeReader(fd, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = crc64.New(crc64.MakeTable(crc64.ECMA)) + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + + var res LivenessRecognitionResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(obj) + "?ci-process=LivenessRecognition", + method: http.MethodPut, + optQuery: query, + body: reader, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_doc.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_doc.go new file mode 100644 index 0000000..52d40f7 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_doc.go @@ -0,0 +1,283 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +type DocProcessJobInput struct { + Object string `xml:"Object,omitempty"` +} + +type DocProcessJobOutput struct { + Region string `xml:"Region,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Object string `xml:"Object,omitempty"` +} + +type DocProcessJobDocProcess struct { + SrcType string `xml:"SrcType,omitempty"` + TgtType string `xml:"TgtType,omitempty"` + SheetId int `xml:"SheetId,omitempty"` + StartPage int `xml:"StartPage,omitempty"` + EndPage int `xml:"EndPage,omitempty"` + ImageParams string `xml:"ImageParams,omitempty"` + DocPassword string `xml:"DocPassword,omitempty"` + Comments int `xml:"Comments,omitempty"` + PaperDirection int `xml:"PaperDirection,omitempty"` + Quality int `xml:"Quality,omitempty"` + Zoom int `xml:"Zoom,omitempty"` +} + +type DocProcessJobDocProcessResult struct { + FailPageCount int `xml:",omitempty"` + SuccPageCount int `xml:"SuccPageCount,omitempty"` + TaskId string `xml:"TaskId,omitempty"` + TgtType string `xml:"TgtType,omitempty"` + TotalPageCount int `xml:"TotalPageCount,omitempty"` + TotalSheetCount int `xml:"TotalSheetCount,omitempty"` + PageInfo []struct { + PageNo int `xml:"PageNo,omitempty"` + TgtUri string `xml:"TgtUri,omitempty"` + XSheetPics int `xml:"X-SheetPics,omitempty"` + PicIndex int `xml:"PicIndex,omitempty"` + PicNum int `xml:"PicNum,omitempty"` + } `xml:"PageInfo,omitempty"` +} + +type DocProcessJobOperation struct { + Output *DocProcessJobOutput `xml:"Output,omitempty"` + DocProcess *DocProcessJobDocProcess `xml:"DocProcess,omitempty"` + DocProcessResult *DocProcessJobDocProcessResult `xml:"DocProcessResult,omitempty"` +} + +type DocProcessJobDetail struct { + Code string `xml:"Code,omitempty"` + Message string `xml:"Message,omitempty"` + JobId string `xml:"JobId,omitempty"` + Tag string `xml:"Tag,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + Input *DocProcessJobInput `xml:"Input,omitempty"` + Operation *DocProcessJobOperation `xml:"Operation,omitempty"` +} + +type CreateDocProcessJobsOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Input *DocProcessJobInput `xml:"Input,omitempty"` + Operation *DocProcessJobOperation `xml:"Operation,omitempty"` + QueueId string `xml:"QueueId,omitempty"` +} + +type CreateDocProcessJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail DocProcessJobDetail `xml:"JobsDetail,omitempty"` +} + +// 创建文档预览任务 https://cloud.tencent.com/document/product/436/54056 +func (s *CIService) CreateDocProcessJobs(ctx context.Context, opt *CreateDocProcessJobsOptions) (*CreateDocProcessJobsResult, *Response, error) { + var res CreateDocProcessJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/doc_jobs", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type DescribeDocProcessJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *DocProcessJobDetail `xml:"JobsDetail,omitempty"` + NonExistJobIds string `xml:"NonExistJobIds,omitempty"` +} + +// 查询文档预览任务 https://cloud.tencent.com/document/product/436/54095 +func (s *CIService) DescribeDocProcessJob(ctx context.Context, jobid string) (*DescribeDocProcessJobResult, *Response, error) { + var res DescribeDocProcessJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/doc_jobs/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type DescribeDocProcessJobsOptions struct { + QueueId string `url:"queueId,omitempty"` + Tag string `url:"tag,omitempty"` + OrderByTime string `url:"orderByTime,omitempty"` + NextToken string `url:"nextToken,omitempty"` + Size int `url:"size,omitempty"` + States string `url:"states,omitempty"` + StartCreationTime string `url:"startCreationTime,omitempty"` + EndCreationTime string `url:"endCreationTime,omitempty"` +} + +type DescribeDocProcessJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []DocProcessJobDetail `xml:"JobsDetail,omitempty"` + NextToken string `xml:"NextToken,omitempty"` +} + +// 拉取符合条件的文档预览任务 https://cloud.tencent.com/document/product/436/54096 +func (s *CIService) DescribeDocProcessJobs(ctx context.Context, opt *DescribeDocProcessJobsOptions) (*DescribeDocProcessJobsResult, *Response, error) { + var res DescribeDocProcessJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/doc_jobs", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type DescribeDocProcessQueuesOptions struct { + QueueIds string `url:"queueIds,omitempty"` + State string `url:"state,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +type DescribeDocProcessQueuesResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` + QueueList []DocProcessQueue `xml:"QueueList,omitempty"` + NonExistPIDs []string `xml:"NonExistPIDs,omitempty"` +} + +type DocProcessQueue struct { + QueueId string `xml:"QueueId,omitempty"` + Name string `xml:"Name,omitempty"` + State string `xml:"State,omitempty"` + MaxSize int `xml:"MaxSize,omitempty"` + MaxConcurrent int `xml:"MaxConcurrent,omitempty"` + UpdateTime string `xml:"UpdateTime,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` + NotifyConfig *DocProcessQueueNotifyConfig `xml:"NotifyConfig,omitempty"` +} + +type DocProcessQueueNotifyConfig struct { + Url string `xml:"Url,omitempty"` + State string `xml:"State,omitempty"` + Type string `xml:"Type,omitempty"` + Event string `xml:"Event,omitempty"` +} + +// 查询文档预览队列 https://cloud.tencent.com/document/product/436/54055 +func (s *CIService) DescribeDocProcessQueues(ctx context.Context, opt *DescribeDocProcessQueuesOptions) (*DescribeDocProcessQueuesResult, *Response, error) { + var res DescribeDocProcessQueuesResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/docqueue", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type UpdateDocProcessQueueOptions struct { + XMLName xml.Name `xml:"Request"` + Name string `xml:"Name,omitempty"` + QueueID string `xml:"QueueID,omitempty"` + State string `xml:"State,omitempty"` + NotifyConfig *DocProcessQueueNotifyConfig `xml:"NotifyConfig,omitempty"` +} + +type UpdateDocProcessQueueResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId"` + Queue *DocProcessQueue `xml:"Queue"` +} + +// 更新文档预览队列 https://cloud.tencent.com/document/product/436/54094 +func (s *CIService) UpdateDocProcessQueue(ctx context.Context, opt *UpdateDocProcessQueueOptions) (*UpdateDocProcessQueueResult, *Response, error) { + var res UpdateDocProcessQueueResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/docqueue/" + opt.QueueID, + body: opt, + method: http.MethodPut, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type DescribeDocProcessBucketsOptions struct { + Regions string `url:"regions,omitempty"` + BucketNames string `url:"bucketNames,omitempty"` + BucketName string `url:"bucketName,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +type DescribeDocProcessBucketsResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` + DocBucketList []DocProcessBucket `xml:"DocBucketList,omitempty"` +} +type DocProcessBucket struct { + BucketId string `xml:"BucketId,omitempty"` + Name string `xml:"Name,omitempty"` + Region string `xml:"Region,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` + AliasBucketId string `xml:"AliasBucketId,omitempty"` +} + +// 查询文档预览开通状态 https://cloud.tencent.com/document/product/436/54057 +func (s *CIService) DescribeDocProcessBuckets(ctx context.Context, opt *DescribeDocProcessBucketsOptions) (*DescribeDocProcessBucketsResult, *Response, error) { + var res DescribeDocProcessBucketsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/docbucket", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +type DocPreviewOptions struct { + SrcType string `url:"srcType,omitempty"` + Page int `url:"page,omitempty"` + ImageParams string `url:"ImageParams,omitempty"` + Sheet int `url:"sheet,omitempty"` + DstType string `url:"dstType,omitempty"` + Password string `url:"password,omitempty"` + Comment int `url:"comment,omitempty"` + ExcelPaperDirection int `url:"excelPaperDirection,omitempty"` + Quality int `url:"quality,omitempty"` + Zoom int `url:"zoom,omitempty"` +} + +// 同步请求接口 https://cloud.tencent.com/document/product/436/54058 +func (s *CIService) DocPreview(ctx context.Context, name string, opt *DocPreviewOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name) + "?ci-process=doc-preview", + optQuery: opt, + method: http.MethodGet, + disableCloseBody: true, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_media.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_media.go new file mode 100644 index 0000000..5c38b51 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/ci_media.go @@ -0,0 +1,2140 @@ +package cos + +import ( + "context" + "encoding/xml" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/clbanning/mxj" + "github.com/mitchellh/mapstructure" +) + +// JobInput TODO +type JobInput struct { + Object string `xml:"Object,omitempty"` +} + +// StreamExtract TODO +type StreamExtract struct { + Index string `xml:"Index,omitempty"` + Object string `xml:"Object,omitempty"` +} + +// JobOutput TODO +type JobOutput struct { + Region string `xml:"Region,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Object string `xml:"Object,omitempty"` + SpriteObject string `xml:"SpriteObject,omitempty"` + AuObject string `xml:"AuObject,omitempty"` + StreamExtract []StreamExtract `xml:"StreamExtract,omitempty"` +} + +// ClipConfig TODO +type ClipConfig struct { + Duration string `xml:"Duration"` +} + +// Container TODO +type Container struct { + Format string `xml:"Format"` + ClipConfig ClipConfig `xml:"ClipConfig"` +} + +// Video TODO +type Video struct { + Codec string `xml:"Codec"` + Width string `xml:"Width,omitempty"` + Height string `xml:"Height,omitempty"` + Fps string `xml:"Fps,omitempty"` + Remove string `xml:"Remove,omitempty"` + Profile string `xml:"Profile,omitempty"` + Bitrate string `xml:"Bitrate,omitempty"` + Crf string `xml:"Crf,omitempty"` + Gop string `xml:"Gop,omitempty"` + Preset string `xml:"Preset,omitempty"` + Bufsize string `xml:"Bufsize,omitempty"` + Maxrate string `xml:"Maxrate,omitempty"` + HlsTsTime string `xml:"HlsTsTime,omitempty"` + DashSegment string `xml:"DashSegment,omitempty"` + Pixfmt string `xml:"Pixfmt,omitempty"` + LongShortMode string `xml:"LongShortMode,omitempty"` + Rotate string `xml:"Rotate,omitempty"` +} + +// TimeInterval TODO +type TimeInterval struct { + Start string `xml:"Start,omitempty"` + Duration string `xml:"Duration,omitempty"` +} + +// Audio TODO +type Audio struct { + Codec string `xml:"Codec,omitempty"` + Samplerate string `xml:"Samplerate,omitempty"` + Bitrate string `xml:"Bitrate,omitempty"` + Channels string `xml:"Channels,omitempty"` + Remove string `xml:"Remove,omitempty"` + KeepTwoTracks string `xml:"KeepTwoTracks,omitempty"` + SwitchTrack string `xml:"SwitchTrack,omitempty"` + SampleFormat string `xml:"SampleFormat,omitempty"` +} + +// TransConfig TODO +type TransConfig struct { + AdjDarMethod string `xml:"AdjDarMethod,omitempty"` + IsCheckReso string `xml:"IsCheckReso,omitempty"` + ResoAdjMethod string `xml:"ResoAdjMethod,omitempty"` + IsCheckVideoBitrate string `xml:"IsCheckVideoBitrate,omitempty"` + VideoBitrateAdjMethod string `xml:"VideoBitrateAdjMethod,omitempty"` + IsCheckAudioBitrate string `xml:"IsCheckAudioBitrate,omitempty"` + AudioBitrateAdjMethod string `xml:"AudioBitrateAdjMethod,omitempty"` + DeleteMetadata string `xml:"DeleteMetadata,omitempty"` + IsHdr2Sdr string `xml:"IsHdr2Sdr,omitempty"` + HlsEncrypt *HlsEncrypt `xml:"HlsEncrypt,omitempty"` +} + +// Transcode TODO +type Transcode struct { + Container *Container `xml:"Container,omitempty"` + Video *Video `xml:"Video,omitempty"` + TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"` + Audio *Audio `xml:"Audio,omitempty"` + TransConfig *TransConfig `xml:"TransConfig,omitempty"` +} + +// Image TODO +type Image struct { + Url string `xml:"Url,omitempty"` + Mode string `xml:"Mode,omitempty"` + Width string `xml:"Width,omitempty"` + Height string `xml:"Height,omitempty"` + Transparency string `xml:"Transparency,omitempty"` + Background string `xml:"Background,omitempty"` +} + +// Text TODO +type Text struct { + FontSize string `xml:"FontSize,omitempty"` + FontType string `xml:"FontType,omitempty"` + FontColor string `xml:"FontColor,omitempty"` + Transparency string `xml:"Transparency,omitempty"` + Text string `xml:"Text,omitempty"` +} + +// Watermark TODO +type Watermark struct { + Type string `xml:"Type,omitempty"` + Pos string `xml:"Pos,omitempty"` // TopLeft:左上; Top:上居中; TopRight:右上; Left:左居中; Center:正中心; Right:右居中; BottomLeft:左下; Bottom:下居中; BottomRight:右下 + LocMode string `xml:"LocMode,omitempty"` + Dx string `xml:"Dx,omitempty"` + Dy string `xml:"Dy,omitempty"` + StartTime string `xml:"StartTime,omitempty"` + EndTime string `xml:"EndTime,omitempty"` + Image *Image `xml:"Image,omitempty"` + Text *Text `xml:"Text,omitempty"` +} + +// EffectConfig TODO +type EffectConfig struct { + EnableStartFadein string `xml:"EnableStartFadein,omitempty"` + StartFadeinTime string `xml:"StartFadeinTime,omitempty"` + EnableEndFadeout string `xml:"EnableEndFadeout,omitempty"` + EndFadeoutTime string `xml:"EndFadeoutTime,omitempty"` + EnableBgmFade string `xml:"EnableBgmFade,omitempty"` + BgmFadeTime string `xml:"BgmFadeTime,omitempty"` +} + +// AudioMix TODO +type AudioMix struct { + AudioSource string `xml:"AudioSource,omitempty"` + MixMode string `xml:"MixMode,omitempty"` + Replace string `xml:"Replace,omitempty"` + EffectConfig *EffectConfig `xml:"EffectConfig,omitempty"` +} + +// ConcatFragment TODO +type ConcatFragment struct { + Url string `xml:"Url,omitempty"` + Mode string `xml:"Mode,omitempty"` + StartTime string `xml:"StartTime,omitempty"` + EndTime string `xml:"EndTime,omitempty"` +} + +// ConcatTemplate TODO +type ConcatTemplate struct { + ConcatFragment []ConcatFragment `xml:"ConcatFragment,omitempty"` + Audio *Audio `xml:"Audio,omitempty"` + Video *Video `xml:"Video,omitempty"` + Container *Container `xml:"Container,omitempty"` + Index string `xml:"Index,omitempty"` + AudioMix *AudioMix `xml:"AudioMix,omitempty"` +} + +// SpriteSnapshotConfig TODO +type SpriteSnapshotConfig struct { + CellHeight string `xml:"CellHeight,omitempty"` + CellWidth string `xml:"CellWidth,omitempty"` + Color string `xml:"Color,omitempty"` + Columns string `xml:"Columns,omitempty"` + Lines string `xml:"Lines,omitempty"` + Margin string `xml:"Margin,omitempty"` + Padding string `xml:"Padding,omitempty"` +} + +// Snapshot TODO +type Snapshot struct { + Mode string `xml:"Mode,omitempty"` + Start string `xml:"Start,omitempty"` + TimeInterval string `xml:"TimeInterval,omitempty"` + Count string `xml:"Count,omitempty"` + Width string `xml:"Width,omitempty"` + Height string `xml:"Height,omitempty"` + CIParam string `xml:"CIParam,omitempty"` + IsCheckCount bool `xml:"IsCheckCount,omitempty"` + IsCheckBlack bool `xml:"IsCheckBlack,omitempty"` + BlackLevel string `xml:"BlackLevel,omitempty"` + PixelBlackThreshold string `xml:"PixelBlackThreshold,omitempty"` + SnapshotOutMode string `xml:"SnapshotOutMode,omitempty"` + SpriteSnapshotConfig *SpriteSnapshotConfig `xml:"SpriteSnapshotConfig,omitempty"` +} + +// AnimationVideo TODO +// 有意和转码区分,两种任务关注的参数不一样避免干扰 +type AnimationVideo struct { + Codec string `xml:"Codec"` + Width string `xml:"Width"` + Height string `xml:"Height"` + Fps string `xml:"Fps"` + AnimateOnlyKeepKeyFrame string `xml:"AnimateOnlyKeepKeyFrame"` + AnimateTimeIntervalOfFrame string `xml:"AnimateTimeIntervalOfFrame"` + AnimateFramesPerSecond string `xml:"AnimateFramesPerSecond"` + Quality string `xml:"Quality"` +} + +// Animation TODO +type Animation struct { + Container *Container `xml:"Container,omitempty"` + Video *AnimationVideo `xml:"Video,omitempty"` + TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"` +} + +// HlsEncrypt TODO +type HlsEncrypt struct { + IsHlsEncrypt bool `xml:"IsHlsEncrypt,omitempty"` + UriKey string `xml:"UriKey,omitempty"` +} + +// Segment TODO +type Segment struct { + Format string `xml:"Format,omitempty"` + Duration string `xml:"Duration,omitempty"` + HlsEncrypt *HlsEncrypt `xml:"HlsEncrypt,omitempty"` +} + +// VideoMontageVideo TODO +type VideoMontageVideo struct { + Codec string `xml:"Codec"` + Width string `xml:"Width"` + Height string `xml:"Height"` + Fps string `xml:"Fps"` + Remove string `xml:"Remove,omitempty"` + Bitrate string `xml:"Bitrate"` + Crf string `xml:"Crf"` +} + +// VideoMontage TODO +type VideoMontage struct { + Container *Container `xml:"Container,omitempty"` + Video *VideoMontageVideo `xml:"Video,omitempty"` + Audio *Audio `xml:"Audio,omitempty"` + Duration string `xml:"Duration,omitempty"` + AudioMix *AudioMix `xml:"AudioMix,omitempty"` +} + +// AudioConfig TODO +type AudioConfig struct { + Codec string `xml:"Codec"` + Samplerate string `xml:"Samplerate"` + Bitrate string `xml:"Bitrate"` + Channels string `xml:"Channels"` +} + +// VoiceSeparate TODO +type VoiceSeparate struct { + AudioMode string `xml:"AudioMode,omitempty"` // IsAudio 人声, IsBackground 背景声, AudioAndBackground 人声和背景声 + AudioConfig *AudioConfig `xml:"AudioConfig,omitempty"` +} + +// ColorEnhance TODO +type ColorEnhance struct { + Enable string `xml:"Enable"` + Contrast string `xml:"Contrast"` + Correction string `xml:"Correction"` + Saturation string `xml:"Saturation"` +} + +// MsSharpen TODO +type MsSharpen struct { + Enable string `xml:"Enable"` + SharpenLevel string `xml:"SharpenLevel"` +} + +// VideoProcess TODO +type VideoProcess struct { + ColorEnhance *ColorEnhance `xml:"ColorEnhance,omitempty"` + MsSharpen *MsSharpen `xml:"MsSharpen,omitempty"` +} + +// SDRtoHDR TODO +type SDRtoHDR struct { + HdrMode string `xml:"HdrMode,omitempty"` // HLG、HDR10 +} + +// SuperResolution TODO +type SuperResolution struct { + Resolution string `xml:"Resolution,omitempty"` // sdtohd、hdto4k + EnableScaleUp string `xml:"EnableScaleUp,omitempty"` + Version string `xml:"Version,omitempty"` +} + +// DigitalWatermark TODO +type DigitalWatermark struct { + Message string `xml:"Message"` + Type string `xml:"Type"` + Version string `xml:"Version"` +} + +// ExtractDigitalWatermark TODO +type ExtractDigitalWatermark struct { + Type string `xml:"Type"` + Version string `xml:"Version"` +} + +// VideoTag TODO +type VideoTag struct { + Scenario string `xml:"Scenario"` +} + +// MediaResult TODO +type MediaResult struct { + OutputFile struct { + Bucket string `xml:"Bucket,omitempty"` + Md5Info []struct { + Md5 string `xml:"Md5,omitempty"` + ObjectName string `xml:"ObjectName,omitempty"` + } `xml:"Md5Info,omitempty"` + ObjectName []string `xml:"ObjectName,omitempty"` + ObjectPrefix string `xml:"ObjectPrefix,omitempty"` + Region string `xml:"Region,omitempty"` + SpriteOutputFile struct { + Bucket string `xml:"Bucket,omitempty"` + Md5Info []struct { + Md5 string `xml:"Md5,omitempty"` + ObjectName string `xml:"ObjectName,omitempty"` + } `xml:"Md5Info,omitempty"` + ObjectName []string `xml:"ObjectName,omitempty"` + ObjectPrefix string `xml:"ObjectPrefix,omitempty"` + Region string `xml:"Region,omitempty"` + } `xml:"SpriteOutputFile,omitempty"` + } `xml:"OutputFile,omitempty"` +} + +// MediaInfo TODO +type MediaInfo struct { + Format struct { + Bitrate string `xml:"Bitrate"` + Duration string `xml:"Duration"` + FormatLongName string `xml:"FormatLongName"` + FormatName string `xml:"FormatName"` + NumProgram string `xml:"NumProgram"` + NumStream string `xml:"NumStream"` + Size string `xml:"Size"` + StartTime string `xml:"StartTime"` + } `xml:"Format"` + Stream struct { + Audio []struct { + Bitrate string `xml:"Bitrate"` + Channel string `xml:"Channel"` + ChannelLayout string `xml:"ChannelLayout"` + CodecLongName string `xml:"CodecLongName"` + CodecName string `xml:"CodecName"` + CodecTag string `xml:"CodecTag"` + CodecTagString string `xml:"CodecTagString"` + CodecTimeBase string `xml:"CodecTimeBase"` + Duration string `xml:"Duration"` + Index string `xml:"Index"` + Language string `xml:"Language"` + SampleFmt string `xml:"SampleFmt"` + SampleRate string `xml:"SampleRate"` + StartTime string `xml:"StartTime"` + Timebase string `xml:"Timebase"` + } `xml:"Audio"` + Subtitle string `xml:"Subtitle"` + Video []struct { + AvgFps string `xml:"AvgFps"` + Bitrate string `xml:"Bitrate"` + CodecLongName string `xml:"CodecLongName"` + CodecName string `xml:"CodecName"` + CodecTag string `xml:"CodecTag"` + CodecTagString string `xml:"CodecTagString"` + CodecTimeBase string `xml:"CodecTimeBase"` + Dar string `xml:"Dar"` + Duration string `xml:"Duration"` + Fps string `xml:"Fps"` + HasBFrame string `xml:"HasBFrame"` + Height string `xml:"Height"` + Index string `xml:"Index"` + Language string `xml:"Language"` + Level string `xml:"Level"` + NumFrames string `xml:"NumFrames"` + PixFormat string `xml:"PixFormat"` + Profile string `xml:"Profile"` + RefFrames string `xml:"RefFrames"` + Rotation string `xml:"Rotation"` + Sar string `xml:"Sar"` + StartTime string `xml:"StartTime"` + Timebase string `xml:"Timebase"` + Width string `xml:"Width"` + ColorRange string `xml:"ColorRange"` + ColorTransfer string `xml:"ColorTransfer"` + ColorPrimaries string `xml:"ColorPrimaries"` + } `xml:"Video"` + } `xml:"Stream"` +} + +// PicProcess TODO +type PicProcess struct { + IsPicInfo string `xml:"IsPicInfo,omitempty"` + ProcessRule string `xml:"ProcessRule,omitempty"` +} + +// PicProcessResult TODO +type PicProcessResult struct { + UploadResult struct { + OriginalInfo struct { + Key string `xml:"Key"` + Location string `xml:"Location"` + ETag string `xml:"ETag"` + ImageInfo struct { + Format string `xml:"Format"` + Width int32 `xml:"Width"` + Height int32 `xml:"Height"` + Quality int32 `xml:"Quality"` + Ave string `xml:"Ave"` + Orientation int32 `xml:"Orientation"` + } `xml:"ImageInfo"` + } `xml:"OriginalInfo"` + ProcessResults struct { + Object struct { + Key string `xml:"Key"` + Location string `xml:"Location"` + Format string `xml:"Format"` + Width int32 `xml:"Width"` + Height int32 `xml:"Height"` + Size int32 `xml:"Size"` + Quality int32 `xml:"Quality"` + Etag string `xml:"Etag"` + } `xml:"Object"` + } `xml:"ProcessResults"` + } `xml:"UploadResult"` +} + +// PicProcessJobOperation TODO +type PicProcessJobOperation struct { + PicProcess *PicProcess `xml:"PicProcess,omitempty"` + PicProcessResult *PicProcessResult `xml:"PicProcessResult,omitempty"` + Output *JobOutput `xml:"Output,omitempty"` +} + +// MediaProcessJobOperation TODO +type MediaProcessJobOperation struct { + Tag string `xml:"Tag,omitempty"` + Output *JobOutput `xml:"Output,omitempty"` + MediaResult *MediaResult `xml:"MediaResult,omitempty"` + MediaInfo *MediaInfo `xml:"MediaInfo,omitempty"` + Transcode *Transcode `xml:"Transcode,omitempty"` + Watermark []Watermark `xml:"Watermark,omitempty"` + TemplateId string `xml:"TemplateId,omitempty"` + WatermarkTemplateId []string `xml:"WatermarkTemplateId,omitempty"` + ConcatTemplate *ConcatTemplate `xml:"ConcatTemplate,omitempty"` + Snapshot *Snapshot `xml:"Snapshot,omitempty"` + Animation *Animation `xml:"Animation,omitempty"` + Segment *Segment `xml:"Segment,omitempty"` + VideoMontage *VideoMontage `xml:"VideoMontage,omitempty"` + VoiceSeparate *VoiceSeparate `xml:"VoiceSeparate,omitempty"` + VideoProcess *VideoProcess `xml:"VideoProcess,omitempty"` + TranscodeTemplateId string `xml:"TranscodeTemplateId,omitempty"` // 视频增强、超分、SDRtoHDR任务类型,可以选择转码模板相关参数 + SDRtoHDR *SDRtoHDR `xml:"SDRtoHDR,omitempty"` + SuperResolution *SuperResolution `xml:"SuperResolution,omitempty"` + DigitalWatermark *DigitalWatermark `xml:"DigitalWatermark,omitempty"` + ExtractDigitalWatermark *ExtractDigitalWatermark `xml:"ExtractDigitalWatermark,omitempty"` + VideoTag *VideoTag `xml:"VideoTag,omitempty"` + UserData string `xml:"UserData,omitempty"` +} + +// CreatePicJobsOptions TODO +type CreatePicJobsOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation *PicProcessJobOperation `xml:"Operation,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + CallBack string `xml:"CallBack,omitempty"` +} + +// CreateMediaJobsOptions TODO +type CreateMediaJobsOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation *MediaProcessJobOperation `xml:"Operation,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + CallBack string `xml:"CallBack,omitempty"` +} + +// MediaProcessJobDetail TODO +type MediaProcessJobDetail struct { + Code string `xml:"Code,omitempty"` + Message string `xml:"Message,omitempty"` + JobId string `xml:"JobId,omitempty"` + Tag string `xml:"Tag,omitempty"` + Progress string `xml:"Progress,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + StartTime string `xml:"StartTime,omitempty"` + EndTime string `xml:"EndTime,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation *MediaProcessJobOperation `xml:"Operation,omitempty"` +} + +// CreatePicJobsResult TODO +type CreatePicJobsResult CreateMediaJobsResult + +// CreateMediaJobsResult TODO +type CreateMediaJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *MediaProcessJobDetail `xml:"JobsDetail,omitempty"` +} + +// CreateMultiMediaJobsOptions TODO +type CreateMultiMediaJobsOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation []MediaProcessJobOperation `xml:"Operation,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + CallBack string `xml:"CallBack,omitempty"` +} + +// CreateMultiMediaJobsResult TODO +type CreateMultiMediaJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []MediaProcessJobDetail `xml:"JobsDetail,omitempty"` +} + +// MediaProcessJobsNotifyBody TODO +type MediaProcessJobsNotifyBody struct { + XMLName xml.Name `xml:"Response"` + EventName string `xml:"EventName"` + JobsDetail *MediaProcessJobDetail `xml:"JobsDetail,omitempty"` +} + +// WorkflowExecutionNotifyBody TODO +type WorkflowExecutionNotifyBody struct { + XMLName xml.Name `xml:"Response"` + EventName string `xml:"EventName"` + WorkflowExecution struct { + RunId string `xml:"RunId"` + BucketId string `xml:"BucketId"` + Object string `xml:"Object"` + CosHeaders []struct { + Key string `xml:"Key"` + Value string `xml:"Value"` + } `xml:"CosHeaders"` + WorkflowId string `xml:"WorkflowId"` + WorkflowName string `xml:"WorkflowName"` + CreateTime string `xml:"CreateTime"` + State string `xml:"State"` + Tasks []struct { + Type string `xml:"Type"` + CreateTime string `xml:"CreateTime"` + EndTime string `xml:"EndTime"` + State string `xml:"State"` + JobId string `xml:"JobId"` + Name string `xml:"Name"` + TemplateId string `xml:"TemplateId"` + TemplateName string `xml:"TemplateName"` + TranscodeTemplateId string `xml:"TranscodeTemplateId,omitempty"` + TranscodeTemplateName string `xml:"TranscodeTemplateName,omitempty"` + HdrMode string `xml:"HdrMode,omitempty"` + } `xml:"Tasks"` + } `xml:"WorkflowExecution"` +} + +// CreateMultiMediaJobs TODO +func (s *CIService) CreateMultiMediaJobs(ctx context.Context, opt *CreateMultiMediaJobsOptions) (*CreateMultiMediaJobsResult, *Response, error) { + var res CreateMultiMediaJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/jobs", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaJobs TODO +func (s *CIService) CreateMediaJobs(ctx context.Context, opt *CreateMediaJobsOptions) (*CreateMediaJobsResult, *Response, error) { + var res CreateMediaJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/jobs", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreatePicProcessJobs TODO +func (s *CIService) CreatePicProcessJobs(ctx context.Context, opt *CreatePicJobsOptions) (*CreatePicJobsResult, *Response, error) { + var res CreatePicJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/pic_jobs", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribePicProcessJobResult TODO +type DescribePicProcessJobResult DescribeMediaProcessJobResult + +// DescribeMediaProcessJobResult TODO +type DescribeMediaProcessJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *MediaProcessJobDetail `xml:"JobsDetail,omitempty"` + NonExistJobIds string `xml:"NonExistJobIds,omitempty"` +} + +// DescribeMediaJob TODO +func (s *CIService) DescribeMediaJob(ctx context.Context, jobid string) (*DescribeMediaProcessJobResult, *Response, error) { + var res DescribeMediaProcessJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/jobs/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribePicProcessJob TODO +func (s *CIService) DescribePicProcessJob(ctx context.Context, jobid string) (*DescribePicProcessJobResult, *Response, error) { + var res DescribePicProcessJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/pic_jobs/" + jobid, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMutilMediaProcessJobResult TODO +type DescribeMutilMediaProcessJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []MediaProcessJobDetail `xml:"JobsDetail,omitempty"` + NonExistJobIds []string `xml:"NonExistJobIds,omitempty"` +} + +// DescribeMultiMediaJob TODO +func (s *CIService) DescribeMultiMediaJob(ctx context.Context, jobids []string) (*DescribeMutilMediaProcessJobResult, *Response, error) { + jobidsStr := "" + if len(jobids) < 1 { + return nil, nil, errors.New("empty param jobids") + } else { + jobidsStr = strings.Join(jobids, ",") + } + + var res DescribeMutilMediaProcessJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/jobs/" + jobidsStr, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMediaJobsOptions TODO +type DescribeMediaJobsOptions struct { + QueueId string `url:"queueId,omitempty"` + Tag string `url:"tag,omitempty"` + OrderByTime string `url:"orderByTime,omitempty"` + NextToken string `url:"nextToken,omitempty"` + Size int `url:"size,omitempty"` + States string `url:"states,omitempty"` + StartCreationTime string `url:"startCreationTime,omitempty"` + EndCreationTime string `url:"endCreationTime,omitempty"` +} + +// DescribeMediaJobsResult TODO +type DescribeMediaJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []MediaProcessJobDetail `xml:"JobsDetail,omitempty"` + NextToken string `xml:"NextToken,omitempty"` +} + +// DescribeMediaJobs TODO +// https://cloud.tencent.com/document/product/460/48235 +func (s *CIService) DescribeMediaJobs(ctx context.Context, opt *DescribeMediaJobsOptions) (*DescribeMediaJobsResult, *Response, error) { + var res DescribeMediaJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/jobs", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribePicProcessQueuesOptions TODO +type DescribePicProcessQueuesOptions DescribeMediaProcessQueuesOptions + +// DescribeMediaProcessQueuesOptions TODO +type DescribeMediaProcessQueuesOptions struct { + QueueIds string `url:"queueIds,omitempty"` + State string `url:"state,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +// DescribePicProcessQueuesResult TODO +type DescribePicProcessQueuesResult DescribeMediaProcessQueuesResult + +// DescribeMediaProcessQueuesResult TODO +type DescribeMediaProcessQueuesResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` + QueueList []MediaProcessQueue `xml:"QueueList,omitempty"` + NonExistPIDs []string `xml:"NonExistPIDs,omitempty"` +} + +// MediaProcessQueue TODO +type MediaProcessQueue struct { + QueueId string `xml:"QueueId,omitempty"` + Name string `xml:"Name,omitempty"` + State string `xml:"State,omitempty"` + MaxSize int `xml:"MaxSize,omitempty"` + MaxConcurrent int `xml:"MaxConcurrent,omitempty"` + UpdateTime string `xml:"UpdateTime,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` + NotifyConfig *MediaProcessQueueNotifyConfig `xml:"NotifyConfig,omitempty"` +} + +// MediaProcessQueueNotifyConfig TODO +type MediaProcessQueueNotifyConfig struct { + Url string `xml:"Url,omitempty"` + State string `xml:"State,omitempty"` + Type string `xml:"Type,omitempty"` + Event string `xml:"Event,omitempty"` +} + +// DescribeMediaProcessQueues TODO +func (s *CIService) DescribeMediaProcessQueues(ctx context.Context, opt *DescribeMediaProcessQueuesOptions) (*DescribeMediaProcessQueuesResult, *Response, error) { + var res DescribeMediaProcessQueuesResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/queue", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribePicProcessQueues TODO +func (s *CIService) DescribePicProcessQueues(ctx context.Context, opt *DescribePicProcessQueuesOptions) (*DescribePicProcessQueuesResult, *Response, error) { + var res DescribePicProcessQueuesResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/picqueue", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaProcessQueueOptions TODO +type UpdateMediaProcessQueueOptions struct { + XMLName xml.Name `xml:"Request"` + Name string `xml:"Name,omitempty"` + QueueID string `xml:"QueueID,omitempty"` + State string `xml:"State,omitempty"` + NotifyConfig *MediaProcessQueueNotifyConfig `xml:"NotifyConfig,omitempty"` +} + +// UpdateMediaProcessQueueResult TODO +type UpdateMediaProcessQueueResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId"` + Queue *MediaProcessQueue `xml:"Queue"` +} + +// UpdateMediaProcessQueue TODO +func (s *CIService) UpdateMediaProcessQueue(ctx context.Context, opt *UpdateMediaProcessQueueOptions) (*UpdateMediaProcessQueueResult, *Response, error) { + var res UpdateMediaProcessQueueResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/queue/" + opt.QueueID, + body: opt, + method: http.MethodPut, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMediaProcessBucketsOptions TODO +type DescribeMediaProcessBucketsOptions struct { + Regions string `url:"regions,omitempty"` + BucketNames string `url:"bucketNames,omitempty"` + BucketName string `url:"bucketName,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +// DescribeMediaProcessBucketsResult TODO +type DescribeMediaProcessBucketsResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` + MediaBucketList []MediaProcessBucket `xml:"MediaBucketList,omitempty"` +} + +// MediaProcessBucket TODO +type MediaProcessBucket struct { + BucketId string `xml:"BucketId,omitempty"` + Region string `xml:"Region,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` +} + +// DescribeMediaProcessBuckets TODO +// 媒体bucket接口 https://cloud.tencent.com/document/product/436/48988 +func (s *CIService) DescribeMediaProcessBuckets(ctx context.Context, opt *DescribeMediaProcessBucketsOptions) (*DescribeMediaProcessBucketsResult, *Response, error) { + var res DescribeMediaProcessBucketsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/mediabucket", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetMediaInfoResult TODO +type GetMediaInfoResult struct { + XMLName xml.Name `xml:"Response"` + MediaInfo *MediaInfo `xml:"MediaInfo"` +} + +// GetMediaInfo TODO +// 媒体信息接口 https://cloud.tencent.com/document/product/436/55672 +func (s *CIService) GetMediaInfo(ctx context.Context, name string, opt *ObjectGetOptions, id ...string) (*GetMediaInfoResult, *Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s&ci-process=videoinfo", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?ci-process=videoinfo", encodeURIComponent(name)) + } else { + return nil, nil, fmt.Errorf("wrong params") + } + + var res GetMediaInfoResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GenerateMediaInfoOptions TODO +type GenerateMediaInfoOptions struct { + XMLName xml.Name `xml:"Request"` + Input *JobInput `xml:"Input,omitempty"` +} + +// GenerateMediaInfo TODO +// 生成媒体信息接口,支持大文件,耗时较大请求 +func (s *CIService) GenerateMediaInfo(ctx context.Context, opt *GenerateMediaInfoOptions) (*GetMediaInfoResult, *Response, error) { + + var res GetMediaInfoResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/mediainfo", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// GetSnapshotOptions TODO +type GetSnapshotOptions struct { + Time float32 `url:"time,omitempty"` + Height int `url:"height,omitempty"` + Width int `url:"width,omitempty"` + Format string `url:"format,omitempty"` + Rotate string `url:"rotate,omitempty"` + Mode string `url:"mode,omitempty"` +} + +// GetSnapshot TODO +// 媒体截图接口 https://cloud.tencent.com/document/product/436/55671 +func (s *CIService) GetSnapshot(ctx context.Context, name string, opt *GetSnapshotOptions, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s&ci-process=snapshot", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?ci-process=snapshot", encodeURIComponent(name)) + } else { + return nil, fmt.Errorf("wrong params") + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + disableCloseBody: true, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +// GetPrivateM3U8Options TODO +type GetPrivateM3U8Options struct { + Expires int `url:"expires"` +} + +// GetPrivateM3U8 TODO +// 获取私有m3u8资源接口 https://cloud.tencent.com/document/product/460/63738 +func (s *CIService) GetPrivateM3U8(ctx context.Context, name string, opt *GetPrivateM3U8Options, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s&ci-process=pm3u8", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?ci-process=pm3u8", encodeURIComponent(name)) + } else { + return nil, fmt.Errorf("wrong params") + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + disableCloseBody: true, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +// TriggerWorkflowOptions TODO +type TriggerWorkflowOptions struct { + WorkflowId string `url:"workflowId"` + Object string `url:"object"` +} + +// TriggerWorkflowResult TODO +type TriggerWorkflowResult struct { + XMLName xml.Name `xml:"Response"` + InstanceId string `xml:"InstanceId"` + RequestId string `xml:"RequestId"` +} + +// TriggerWorkflow TODO +// 单文件触发工作流 https://cloud.tencent.com/document/product/460/54640 +func (s *CIService) TriggerWorkflow(ctx context.Context, opt *TriggerWorkflowOptions) (*TriggerWorkflowResult, *Response, error) { + var res TriggerWorkflowResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/triggerworkflow", + optQuery: opt, + method: http.MethodPost, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeWorkflowExecutionsOptions TODO +type DescribeWorkflowExecutionsOptions struct { + WorkflowId string `url:"workflowId,omitempty"` + Name string `url:"Name,omitempty"` + OrderByTime string `url:"orderByTime,omitempty"` + NextToken string `url:"nextToken,omitempty"` + Size int `url:"size,omitempty"` + States string `url:"states,omitempty"` + StartCreationTime string `url:"startCreationTime,omitempty"` + EndCreationTime string `url:"endCreationTime,omitempty"` +} + +// WorkflowExecutionList TODO +type WorkflowExecutionList struct { + RunId string `xml:"RunId,omitempty"` + WorkflowId string `xml:"WorkflowId,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + Object string `xml:"Object,omitempty"` +} + +// DescribeWorkflowExecutionsResult TODO +type DescribeWorkflowExecutionsResult struct { + XMLName xml.Name `xml:"Response"` + WorkflowExecutionList []WorkflowExecutionList `xml:"WorkflowExecutionList,omitempty"` + NextToken string `xml:"NextToken,omitempty"` +} + +// DescribeWorkflowExecutions TODO +// 获取工作流实例列表 https://cloud.tencent.com/document/product/460/45950 +func (s *CIService) DescribeWorkflowExecutions(ctx context.Context, opt *DescribeWorkflowExecutionsOptions) (*DescribeWorkflowExecutionsResult, *Response, error) { + var res DescribeWorkflowExecutionsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/workflowexecution", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// NotifyConfig TODO +type NotifyConfig struct { + URL string `xml:"Url,omitempty"` + Event string `xml:"Event,omitempty"` + Type string `xml:"Type,omitempty"` + ResultFormat string `xml:"ResultFormat,omitempty"` +} + +// ExtFilter TODO +type ExtFilter struct { + State string `xml:"State,omitempty"` + Audio string `xml:"Audio,omitempty"` + Custom string `xml:"Custom,omitempty"` + CustomExts string `xml:"CustomExts,omitempty"` + AllFile string `xml:"AllFile,omitempty"` +} + +// NodeInput TODO +type NodeInput struct { + QueueId string `xml:"QueueId,omitempty"` + ObjectPrefix string `xml:"ObjectPrefix,omitempty"` + NotifyConfig *NotifyConfig `xml:"NotifyConfig,omitempty" json:"NotifyConfig,omitempty"` + ExtFilter *ExtFilter `xml:"ExtFilter,omitempty" json:"ExtFilter,omitempty"` +} + +// NodeOutput TODO +type NodeOutput struct { + Region string `xml:"Region,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Object string `xml:"Object,omitempty"` + AuObject string `xml:"AuObject,omitempty"` + SpriteObject string `xml:"SpriteObject,omitempty"` +} + +// DelogoParam TODO +type DelogoParam struct { + Switch string `xml:"Switch,omitempty"` + Dx string `xml:"Dx,omitempty"` + Dy string `xml:"Dy,omitempty"` + Width string `xml:"Width,omitempty"` + Height string `xml:"Height,omitempty"` +} + +// NodeSDRtoHDR TODO +type NodeSDRtoHDR struct { + HdrMode string `xml:"HdrMode,omitempty"` +} + +// NodeSCF TODO +type NodeSCF struct { + Region string `xml:"Region,omitempty"` + FunctionName string `xml:"FunctionName,omitempty"` + Namespace string `xml:"Namespace,omitempty"` +} + +// VideoStreamConfig TODO +type VideoStreamConfig struct { + VideoStreamName string `xml:"VideoStreamName,omitempty"` + BandWidth string `xml:"BandWidth,omitempty"` +} + +// NodeHlsPackInfo TODO +type NodeHlsPackInfo struct { + VideoStreamConfig []VideoStreamConfig `xml:"VideoStreamConfig,omitempty"` +} + +// NodeSmartCover TODO +type NodeSmartCover struct { + Format string `xml:"Format,omitempty"` + Width string `xml:"Width,omitempty"` + Height string `xml:"Height,omitempty"` + Count string `xml:"Count,omitempty"` + DeleteDuplicates string `xml:"DeleteDuplicates,omitempty"` +} + +// NodeSegmentConfig TODO +type NodeSegmentConfig struct { + Format string `xml:"Format,omitempty"` + Duration string `xml:"Duration,omitempty"` +} + +// NodeStreamPackConfigInfo TODO +type NodeStreamPackConfigInfo struct { + PackType string `xml:"PackType,omitempty"` + IgnoreFailedStream bool `xml:"IgnoreFailedStream,omitempty"` + ReserveAllStreamNode string `xml:"ReserveAllStreamNode,omitempty"` +} + +// NodeOperation TODO +type NodeOperation struct { + TemplateId string `xml:"TemplateId,omitempty" json:"TemplateId,omitempty"` + Output *NodeOutput `xml:"Output,omitempty" json:"Output,omitempty"` + WatermarkTemplateId []string `xml:"WatermarkTemplateId,omitempty" json:"WatermarkTemplateId,omitempty"` + DelogoParam *DelogoParam `xml:"DelogoParam,omitempty" json:"DelogoParam,omitempty"` + SDRtoHDR *NodeSDRtoHDR `xml:"SDRtoHDR,omitempty" json:"SDRtoHDR,omitempty"` + SCF *NodeSCF `xml:"SCF,omitempty" json:"SCF,omitempty"` + HlsPackInfo *NodeHlsPackInfo `xml:"HlsPackInfo,omitempty" json:"HlsPackInfo,omitempty"` + TranscodeTemplateId string `xml:"TranscodeTemplateId,omitempty" json:"TranscodeTemplateId,omitempty"` + SmartCover *NodeSmartCover `xml:"SmartCover,omitempty" json:"SmartCover,omitempty"` + SegmentConfig *NodeSegmentConfig `xml:"SegmentConfig,omitempty" json:"SegmentConfig,omitempty"` + DigitalWatermark *DigitalWatermark `xml:"DigitalWatermark,omitempty" json:"DigitalWatermark,omitempty"` + StreamPackConfigInfo *NodeStreamPackConfigInfo `xml:"StreamPackConfigInfo,omitempty" json:"StreamPackConfigInfo,omitempty"` + StreamPackInfo *NodeHlsPackInfo `xml:"StreamPackInfo,omitempty" json:"StreamPackInfo,omitempty"` +} + +// Node TODO +type Node struct { + Type string `xml:"Type"` + Input *NodeInput `xml:"Input,omitempty" json:"Input,omitempty"` + Operation *NodeOperation `xml:"Operation,omitempty" json:"Operation,omitempty"` +} + +// Topology TODO +type Topology struct { + Dependencies map[string]string `json:"Dependencies,omitempty"` + Nodes map[string]Node `json:"Nodes,omitempty"` +} + +// UnmarshalXML TODO +func (m *Topology) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var v struct { + XMLName xml.Name //`xml:"Topology"` + Dependencies struct { + Inner []byte `xml:",innerxml"` + } `xml:"Dependencies"` + Nodes struct { + Inner []byte `xml:",innerxml"` + } `xml:"Nodes"` + } + err := d.DecodeElement(&v, &start) + if err != nil { + return err + } + + myMap := make(map[string]interface{}) + + // ... do the mxj magic here ... - + + temp := v.Nodes.Inner + + prefix := "" + postfix := "" + str := prefix + string(temp) + postfix + //fmt.Println(str) + myMxjMap, _ := mxj.NewMapXml([]byte(str)) + myMap, _ = myMxjMap["Nodes"].(map[string]interface{}) + nodesMap := make(map[string]Node) + + for k, v := range myMap { + var node Node + mapstructure.Decode(v, &node) + nodesMap[k] = node + } + + // fill myMap + m.Nodes = nodesMap + + deps := make(map[string]interface{}) + + tep := "" + string(v.Dependencies.Inner) + "" + tepMxjMap, _ := mxj.NewMapXml([]byte(tep)) + deps, _ = tepMxjMap["Dependencies"].(map[string]interface{}) + depsString := make(map[string]string) + for k, v := range deps { + depsString[k] = v.(string) + } + m.Dependencies = depsString + return nil +} + +// WorkflowExecution TODO +type WorkflowExecution struct { + RunId string `xml:"RunId,omitempty" json:"RunId,omitempty"` + WorkflowId string `xml:"WorkflowId,omitempty" json:"WorkflowId,omitempty"` + WorkflowName string `xml:"WorkflowName,omitempty" json:"WorkflowName,omitempty"` + State string `xml:"State,omitempty" json:"State,omitempty"` + CreateTime string `xml:"CreateTime,omitempty" json:"CreateTime,omitempty"` + Object string `xml:"Object,omitempty" json:"Object,omitempty"` + Topology Topology `xml:"Topology,omitempty" json:"Topology,omitempty"` +} + +// DescribeWorkflowExecutionResult TODO +type DescribeWorkflowExecutionResult struct { + XMLName xml.Name `xml:"Response"` + WorkflowExecution []WorkflowExecution `xml:"WorkflowExecution,omitempty" json:"WorkflowExecution,omitempty"` + NextToken string `xml:"NextToken,omitempty" json:"NextToken,omitempty"` +} + +// DescribeWorkflowExecution TODO +// 获取工作流实例详情 https://cloud.tencent.com/document/product/460/45949 +func (s *CIService) DescribeWorkflowExecution(ctx context.Context, runId string) (*DescribeWorkflowExecutionResult, *Response, error) { + var res DescribeWorkflowExecutionResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/workflowexecution/" + runId, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// SpeechRecognition TODO +type SpeechRecognition struct { + ChannelNum string `xml:"ChannelNum,omitempty"` + ConvertNumMode string `xml:"ConvertNumMode,omitempty"` + EngineModelType string `xml:"EngineModelType,omitempty"` + FilterDirty string `xml:"FilterDirty,omitempty"` + FilterModal string `xml:"FilterModal,omitempty"` + ResTextFormat string `xml:"ResTextFormat,omitempty"` +} + +// SpeechRecognitionResult TODO +type SpeechRecognitionResult struct { + AudioTime float64 `xml:"AudioTime,omitempty"` + Result []string `xml:"Result,omitempty"` +} + +// ASRJobOperation TODO +type ASRJobOperation struct { + Tag string `xml:"Tag,omitempty"` + Output *JobOutput `xml:"Output,omitempty"` + SpeechRecognition *SpeechRecognition `xml:"SpeechRecognition,omitempty"` + SpeechRecognitionResult *SpeechRecognitionResult `xml:"SpeechRecognitionResult,omitempty"` +} + +// CreateASRJobsOptions TODO +type CreateASRJobsOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation *ASRJobOperation `xml:"Operation,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + CallBack string `xml:"CallBack,omitempty"` +} + +// ASRJobDetail TODO +type ASRJobDetail struct { + Code string `xml:"Code,omitempty"` + Message string `xml:"Message,omitempty"` + JobId string `xml:"JobId,omitempty"` + Tag string `xml:"Tag,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + Input *JobInput `xml:"Input,omitempty"` + Operation *ASRJobOperation `xml:"Operation,omitempty"` +} + +// CreateASRJobsResult TODO +type CreateASRJobsResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail *ASRJobDetail `xml:"JobsDetail,omitempty"` +} + +// CreateASRJobs TODO +func (s *CIService) CreateASRJobs(ctx context.Context, opt *CreateASRJobsOptions) (*CreateASRJobsResult, *Response, error) { + var res CreateASRJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/asr_jobs", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMutilASRJobResult TODO +type DescribeMutilASRJobResult struct { + XMLName xml.Name `xml:"Response"` + JobsDetail []ASRJobDetail `xml:"JobsDetail,omitempty"` + NonExistJobIds []string `xml:"NonExistJobIds,omitempty"` +} + +// DescribeMultiASRJob TODO +func (s *CIService) DescribeMultiASRJob(ctx context.Context, jobids []string) (*DescribeMutilASRJobResult, *Response, error) { + jobidsStr := "" + if len(jobids) < 1 { + return nil, nil, errors.New("empty param jobids") + } else { + jobidsStr = strings.Join(jobids, ",") + } + + var res DescribeMutilASRJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/asr_jobs/" + jobidsStr, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMediaTemplateOptions TODO +type DescribeMediaTemplateOptions struct { + Tag string `url:"tag,omitempty"` + Category string `url:"category,omitempty"` + Ids string `url:"ids,omitempty"` + Name string `url:"name,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +// DescribeMediaTemplateResult TODO +type DescribeMediaTemplateResult struct { + XMLName xml.Name `xml:"Response"` + TemplateList []Template `xml:"TemplateList,omitempty"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` +} + +// DescribeMediaTemplate 搜索模板 +func (s *CIService) DescribeMediaTemplate(ctx context.Context, opt *DescribeMediaTemplateOptions) (*DescribeMediaTemplateResult, *Response, error) { + var res DescribeMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DeleteMediaTemplateResult TODO +type DeleteMediaTemplateResult struct { + RequestId string `xml:"RequestId,omitempty"` + TemplateId string `xml:"TemplateId,omitempty"` +} + +// DeleteMediaTemplate TODO +func (s *CIService) DeleteMediaTemplate(ctx context.Context, tempalteId string) (*DeleteMediaTemplateResult, *Response, error) { + var res DeleteMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + tempalteId, + method: http.MethodDelete, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaSnapshotTemplateOptions TODO +type CreateMediaSnapshotTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Snapshot *Snapshot `xml:"Snapshot,omitempty"` +} + +// CreateMediaTranscodeTemplateOptions TODO +type CreateMediaTranscodeTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Container *Container `xml:"Container,omitempty"` + Video *Video `xml:"Video,omitempty"` + Audio *Audio `xml:"Audio,omitempty"` + TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"` + TransConfig *TransConfig `xml:"TransConfig,omitempty"` +} + +// CreateMediaAnimationTemplateOptions TODO +type CreateMediaAnimationTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Container *Container `xml:"Container,omitempty"` + Video *AnimationVideo `xml:"Video,omitempty"` + TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"` +} + +// CreateMediaConcatTemplateOptions TODO +type CreateMediaConcatTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + ConcatTemplate *ConcatTemplate `xml:"ConcatTemplate,omitempty"` +} + +// CreateMediaVideoProcessTemplateOptions TODO +type CreateMediaVideoProcessTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + ColorEnhance *ColorEnhance `xml:"ColorEnhance,omitempty"` + MsSharpen *MsSharpen `xml:"MsSharpen,omitempty"` +} + +// CreateMediaVideoMontageTemplateOptions TODO +type CreateMediaVideoMontageTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Duration string `xml:"Duration,omitempty"` + Container *Container `xml:"Container,omitempty"` + Video *Video `xml:"Video,omitempty"` + Audio *Audio `xml:"Audio,omitempty"` + AudioMix *AudioMix `xml:"AudioMix,omitempty"` +} + +// CreateMediaVoiceSeparateTemplateOptions TODO +type CreateMediaVoiceSeparateTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + AudioMode string `xml:"AudioMode,omitempty"` + AudioConfig *AudioConfig `xml:"AudioConfig,omitempty"` +} + +// CreateMediaSuperResolutionTemplateOptions TODO +type CreateMediaSuperResolutionTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Resolution string `xml:"Resolution,omitempty"` // sdtohd、hdto4k + EnableScaleUp string `xml:"EnableScaleUp,omitempty"` + Version string `xml:"Version,omitempty"` +} + +// CreateMediaPicProcessTemplateOptions TODO +type CreateMediaPicProcessTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + PicProcess *PicProcess `xml:"PicProcess,omitempty"` +} + +// CreateMediaWatermarkTemplateOptions TODO +type CreateMediaWatermarkTemplateOptions struct { + XMLName xml.Name `xml:"Request"` + Tag string `xml:"Tag,omitempty"` + Name string `xml:"Name,omitempty"` + Watermark *Watermark `xml:"Watermark,omitempty"` +} + +// CreateMediaTemplateResult TODO +type CreateMediaTemplateResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + Template *Template `xml:"Template,omitempty"` +} + +// Template TODO +type Template struct { + TemplateId string `xml:"TemplateId,omitempty"` + Tag string `xml:"Code,omitempty"` + Name string `xml:"Name,omitempty"` + TransTpl *Transcode `xml:"TransTpl,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` + UpdateTime string `xml:"UpdateTime,omitempty"` + BucketId string `xml:"BucketId,omitempty"` + Category string `xml:"Category,omitempty"` + Snapshot *Snapshot `xml:"Snapshot,omitempty"` + Animation *Animation `xml:"Animation,omitempty"` + ConcatTemplate *ConcatTemplate `xml:"ConcatTemplate,omitempty"` + VideoProcess *VideoProcess `xml:"VideoProcess,omitempty"` + VideoMontage *VideoMontage `xml:"VideoMontage,omitempty"` + VoiceSeparate *VoiceSeparate `xml:"VoiceSeparate,omitempty"` + SuperResolution *SuperResolution `xml:"SuperResolution,omitempty"` + PicProcess *PicProcess `xml:"PicProcess,omitempty"` + Watermark *Watermark `xml:"Watermark,omitempty"` +} + +// CreateMediaSnapshotTemplate 创建截图模板 +func (s *CIService) CreateMediaSnapshotTemplate(ctx context.Context, opt *CreateMediaSnapshotTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaSnapshotTemplate 更新截图模板 +func (s *CIService) UpdateMediaSnapshotTemplate(ctx context.Context, opt *CreateMediaSnapshotTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaTranscodeTemplate Options 创建转码模板 +func (s *CIService) CreateMediaTranscodeTemplate(ctx context.Context, opt *CreateMediaTranscodeTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaTranscodeTemplate 更新转码模板 +func (s *CIService) UpdateMediaTranscodeTemplate(ctx context.Context, opt *CreateMediaTranscodeTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaAnimationTemplate 创建动图模板 +func (s *CIService) CreateMediaAnimationTemplate(ctx context.Context, opt *CreateMediaAnimationTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaAnimationTemplate 更新动图模板 +func (s *CIService) UpdateMediaAnimationTemplate(ctx context.Context, opt *CreateMediaAnimationTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaConcatTemplate 创建拼接模板 +func (s *CIService) CreateMediaConcatTemplate(ctx context.Context, opt *CreateMediaConcatTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaConcatTemplate 更新拼接模板 +func (s *CIService) UpdateMediaConcatTemplate(ctx context.Context, opt *CreateMediaConcatTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaVideoProcessTemplate 创建视频增强模板 +func (s *CIService) CreateMediaVideoProcessTemplate(ctx context.Context, opt *CreateMediaVideoProcessTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaVideoProcessTemplate 更新视频增强模板 +func (s *CIService) UpdateMediaVideoProcessTemplate(ctx context.Context, opt *CreateMediaVideoProcessTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaVideoMontageTemplate 创建精彩集锦模板 +func (s *CIService) CreateMediaVideoMontageTemplate(ctx context.Context, opt *CreateMediaVideoMontageTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaVideoMontageTemplate 更新精彩集锦模板 +func (s *CIService) UpdateMediaVideoMontageTemplate(ctx context.Context, opt *CreateMediaVideoMontageTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaVoiceSeparateTemplate 创建人声分离模板 +func (s *CIService) CreateMediaVoiceSeparateTemplate(ctx context.Context, opt *CreateMediaVoiceSeparateTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaVoiceSeparateTemplate 更新人声分离模板 +func (s *CIService) UpdateMediaVoiceSeparateTemplate(ctx context.Context, opt *CreateMediaVoiceSeparateTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaSuperResolutionTemplate 创建超级分辨率模板 +func (s *CIService) CreateMediaSuperResolutionTemplate(ctx context.Context, opt *CreateMediaSuperResolutionTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaSuperResolutionTemplate 更新超级分辨率模板 +func (s *CIService) UpdateMediaSuperResolutionTemplate(ctx context.Context, opt *CreateMediaSuperResolutionTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaPicProcessTemplate 创建图片处理模板 +func (s *CIService) CreateMediaPicProcessTemplate(ctx context.Context, opt *CreateMediaPicProcessTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaPicProcessTemplate 更新图片处理模板 +func (s *CIService) UpdateMediaPicProcessTemplate(ctx context.Context, opt *CreateMediaPicProcessTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CreateMediaWatermarkTemplate 创建水印模板 +func (s *CIService) CreateMediaWatermarkTemplate(ctx context.Context, opt *CreateMediaWatermarkTemplateOptions) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaWatermarkTemplate 更新水印模板 +func (s *CIService) UpdateMediaWatermarkTemplate(ctx context.Context, opt *CreateMediaWatermarkTemplateOptions, templateId string) (*CreateMediaTemplateResult, *Response, error) { + var res CreateMediaTemplateResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/template/" + templateId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// MediaWorkflow TODO +type MediaWorkflow struct { + Name string `xml:"Name,omitempty"` + WorkflowId string `xml:"WorkflowId,omitempty"` + State string `xml:"State,omitempty"` + Topology *Topology `xml:"Topology,omitempty"` + CreateTime string `xml:"CreateTime,omitempty"` + UpdateTime string `xml:"UpdateTime,omitempty"` + BucketId string `xml:"BucketId,omitempty"` +} + +// MarshalXML TODO +func (m *CreateMediaWorkflowOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if m == nil { + return nil + } + type xmlMapEntry struct { + XMLName xml.Name + Type string `xml:"Type"` + Input interface{} `xml:",innerxml"` + Operation interface{} `xml:",innerxml"` + } + tokens := []xml.Token{} + tokens = append(tokens, xml.StartElement{Name: xml.Name{Local: "Request"}}) + tokens = append(tokens, xml.StartElement{Name: xml.Name{Local: "MediaWorkflow"}}) + t := xml.StartElement{Name: xml.Name{Local: "Name"}} + tokens = append(tokens, t, xml.CharData(m.MediaWorkflow.Name), xml.EndElement{Name: t.Name}) + t = xml.StartElement{Name: xml.Name{Local: "State"}} + tokens = append(tokens, t, xml.CharData(m.MediaWorkflow.State), xml.EndElement{Name: t.Name}) + tokens = append(tokens, xml.StartElement{Name: xml.Name{Local: "Topology"}}) + tokens = append(tokens, xml.StartElement{Name: xml.Name{Local: "Dependencies"}}) + for key, value := range m.MediaWorkflow.Topology.Dependencies { + t := xml.StartElement{Name: xml.Name{Local: key}} + tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) + } + tokens = append(tokens, xml.EndElement{Name: xml.Name{Local: "Dependencies"}}) + // Nodes + tokens = append(tokens, xml.StartElement{Name: xml.Name{Local: "Nodes"}}) + for _, t := range tokens { + err := e.EncodeToken(t) + if err != nil { + return err + } + } + tokens = tokens[:0] + for k, v := range m.MediaWorkflow.Topology.Nodes { + e.Encode(xmlMapEntry{XMLName: xml.Name{Local: k}, Type: v.Type, Input: v.Input, Operation: v.Operation}) + } + tokens = append(tokens, xml.EndElement{Name: xml.Name{Local: "Nodes"}}) + tokens = append(tokens, xml.EndElement{Name: xml.Name{Local: "Topology"}}) + tokens = append(tokens, xml.EndElement{Name: xml.Name{Local: "MediaWorkflow"}}) + tokens = append(tokens, xml.EndElement{Name: xml.Name{Local: "Request"}}) + for _, t := range tokens { + err := e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// UnmarshalXML TODO +func (m *CreateMediaWorkflowOptions) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var v struct { + XMLName xml.Name //`xml:"Topology"` + Dependencies struct { + Inner []byte `xml:",innerxml"` + } `xml:"Dependencies"` + Nodes struct { + Inner []byte `xml:",innerxml"` + } `xml:"Nodes"` + } + err := d.DecodeElement(&v, &start) + if err != nil { + return err + } + + myMap := make(map[string]interface{}) + + // ... do the mxj magic here ... - + + temp := v.Nodes.Inner + + prefix := "" + postfix := "" + str := prefix + string(temp) + postfix + myMxjMap, _ := mxj.NewMapXml([]byte(str)) + myMap, _ = myMxjMap["Nodes"].(map[string]interface{}) + nodesMap := make(map[string]Node) + + for k, v := range myMap { + var node Node + mapstructure.Decode(v, &node) + nodesMap[k] = node + } + + // fill myMap + m.MediaWorkflow.Topology.Nodes = nodesMap + + deps := make(map[string]interface{}) + + tep := "" + string(v.Dependencies.Inner) + "" + tepMxjMap, _ := mxj.NewMapXml([]byte(tep)) + deps, _ = tepMxjMap["Dependencies"].(map[string]interface{}) + depsString := make(map[string]string) + for k, v := range deps { + depsString[k] = v.(string) + } + m.MediaWorkflow.Topology.Dependencies = depsString + return nil +} + +// CreateMediaWorkflowOptions TODO +type CreateMediaWorkflowOptions struct { + XMLName xml.Name `xml:"Request"` + MediaWorkflow *MediaWorkflow `xml:"MediaWorkflow,omitempty"` +} + +// CreateMediaWorkflowResult TODO +type CreateMediaWorkflowResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + MediaWorkflow *MediaWorkflow `xml:"MediaWorkflow,omitempty"` +} + +// CreateMediaWorkflow 创建工作流 +func (s *CIService) CreateMediaWorkflow(ctx context.Context, opt *CreateMediaWorkflowOptions) (*CreateMediaWorkflowResult, *Response, error) { + var res CreateMediaWorkflowResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/workflow", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// UpdateMediaWorkflow TODO +func (s *CIService) UpdateMediaWorkflow(ctx context.Context, opt *CreateMediaWorkflowOptions, workflowId string) (*CreateMediaWorkflowResult, *Response, error) { + var res CreateMediaWorkflowResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/workflow/" + workflowId, + method: http.MethodPut, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeMediaWorkflowOptions TODO +type DescribeMediaWorkflowOptions struct { + Ids string `url:"ids,omitempty"` + Name string `url:"name,omitempty"` + PageNumber int `url:"pageNumber,omitempty"` + PageSize int `url:"pageSize,omitempty"` +} + +// DescribeMediaWorkflowResult TODO +type DescribeMediaWorkflowResult struct { + XMLName xml.Name `xml:"Response"` + MediaWorkflowList []MediaWorkflow `xml:"MediaWorkflowList,omitempty"` + RequestId string `xml:"RequestId,omitempty"` + TotalCount int `xml:"TotalCount,omitempty"` + PageNumber int `xml:"PageNumber,omitempty"` + PageSize int `xml:"PageSize,omitempty"` + NonExistIDs []string `xml:"NonExistIDs,omitempty"` +} + +// DescribeMediaWorkflow 搜索工作流 +func (s *CIService) DescribeMediaWorkflow(ctx context.Context, opt *DescribeMediaWorkflowOptions) (*DescribeMediaWorkflowResult, *Response, error) { + var res DescribeMediaWorkflowResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/Workflow", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DeleteMediaWorkflowResult TODO +type DeleteMediaWorkflowResult struct { + RequestId string `xml:"RequestId,omitempty"` + WorkflowId string `xml:"WorkflowId,omitempty"` +} + +// DeleteMediaWorkflow 删除工作流 +func (s *CIService) DeleteMediaWorkflow(ctx context.Context, workflowId string) (*DeleteMediaWorkflowResult, *Response, error) { + var res DeleteMediaWorkflowResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/Workflow/" + workflowId, + method: http.MethodDelete, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// InventoryTriggerJobInput TODO +type InventoryTriggerJobInput struct { + Manifest string `xml:"Manifest,omitempty"` + UrlFile string `xml:"UrlFile,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Object string `xml:"Object,omitempty"` +} + +// InventoryTriggerJobOperationTimeInterval TODO +type InventoryTriggerJobOperationTimeInterval struct { + Start string `xml:"Start,omitempty"` + End string `xml:"End,omitempty"` +} + +// InventoryTriggerJobOperation TODO +type InventoryTriggerJobOperation struct { + WorkflowIds string `xml:"WorkflowIds,omitempty"` + TimeInterval InventoryTriggerJobOperationTimeInterval `xml:"TimeInterval,omitempty"` +} + +// InventoryTriggerJob TODO +type InventoryTriggerJob struct { + Name string `xml:"Name,omitempty"` + Input *InventoryTriggerJobInput `xml:"Input,omitempty"` + Operation *InventoryTriggerJobOperation `xml:"Operation,omitempty"` +} + +// CreateInventoryTriggerJobOptions TODO +type CreateInventoryTriggerJobOptions struct { + XMLName xml.Name `xml:"Request"` + Name string `xml:"Name,omitempty"` + Input *InventoryTriggerJobInput `xml:"Input,omitempty"` + Operation *InventoryTriggerJobOperation `xml:"Operation,omitempty"` +} + +// InventoryTriggerJobDetail TODO +type InventoryTriggerJobDetail struct { + Code string `xml:"Code,omitempty"` + Message string `xml:"Message,omitempty"` + JobId string `xml:"JobId,omitempty"` + Tag string `xml:"Tag,omitempty"` + Progress string `xml:"Progress,omitempty"` + State string `xml:"State,omitempty"` + CreationTime string `xml:"CreationTime,omitempty"` + StartTime string `xml:"StartTime,omitempty"` + EndTime string `xml:"EndTime,omitempty"` + QueueId string `xml:"QueueId,omitempty"` + Input *InventoryTriggerJobInput `xml:"Input,omitempty"` + Operation *InventoryTriggerJobOperation `xml:"Operation,omitempty"` +} + +// CreateInventoryTriggerJobResult TODO +type CreateInventoryTriggerJobResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + JobsDetail *InventoryTriggerJobDetail `xml:"JobsDetail,omitempty"` +} + +// CreateInventoryTriggerJob TODO +func (s *CIService) CreateInventoryTriggerJob(ctx context.Context, opt *CreateInventoryTriggerJobOptions) (*CreateInventoryTriggerJobResult, *Response, error) { + var res CreateInventoryTriggerJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/inventorytriggerjob", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeInventoryTriggerJobResult TODO +type DescribeInventoryTriggerJobResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + JobsDetail *InventoryTriggerJobDetail `xml:"JobsDetail,omitempty"` + NonExistJobId string `xml:"NonExistJobId,omitempty"` +} + +// DescribeInventoryTriggerJob 查询指定存量触发工作流的任务 +func (s *CIService) DescribeInventoryTriggerJob(ctx context.Context, jobId string) (*DescribeInventoryTriggerJobResult, *Response, error) { + var res DescribeInventoryTriggerJobResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/inventorytriggerjob/" + jobId, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// DescribeInventoryTriggerJobsOptions TODO +type DescribeInventoryTriggerJobsOptions struct { + NextToken string `url:"nextToken,omitempty"` + Size string `url:"size,omitempty"` + OrderByTime string `url:"orderByTime,omitempty"` + States string `url:"states,omitempty"` + StartCreationTime string `url:"states,omitempty"` + EndCreationTime string `url:"endCreationTime,omitempty"` + WorkflowId string `url:"workflowId,omitempty"` + JobId string `url:"jobId,omitempty"` + Name string `url:"name,omitempty"` +} + +// DescribeInventoryTriggerJobsResult TODO +type DescribeInventoryTriggerJobsResult struct { + XMLName xml.Name `xml:"Response"` + RequestId string `xml:"RequestId,omitempty"` + JobsDetail *InventoryTriggerJobDetail `xml:"JobsDetail,omitempty"` + NextToken string `xml:"NextToken,omitempty"` +} + +// DescribeInventoryTriggerJobs 查询存量触发工作流的任务 +func (s *CIService) DescribeInventoryTriggerJobs(ctx context.Context, opt *DescribeInventoryTriggerJobsOptions) (*DescribeInventoryTriggerJobsResult, *Response, error) { + var res DescribeInventoryTriggerJobsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/inventorytriggerjob", + optQuery: opt, + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} + +// CancelInventoryTriggerJob TODO +func (s *CIService) CancelInventoryTriggerJob(ctx context.Context, jobId string) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.CIURL, + uri: "/inventorytriggerjob/" + jobId, + method: http.MethodPut, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/cos.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/cos.go new file mode 100644 index 0000000..382fae1 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/cos.go @@ -0,0 +1,563 @@ +package cos + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "text/template" + "time" + + "regexp" + "strconv" + + "github.com/google/go-querystring/query" + "github.com/mozillazg/go-httpheader" +) + +const ( + // Version current go sdk version + Version = "0.7.36" + UserAgent = "cos-go-sdk-v5/" + Version + contentTypeXML = "application/xml" + defaultServiceBaseURL = "http://service.cos.myqcloud.com" +) + +var ( + bucketURLTemplate = template.Must( + template.New("bucketURLFormat").Parse( + "{{.Schema}}://{{.BucketName}}.cos.{{.Region}}.myqcloud.com", + ), + ) + + // {|}{bucketname-appid}.{cos|cos-internal|cos-website|ci}.{region}.{myqcloud.com/tencentcos.cn}{/} + hostSuffix = regexp.MustCompile(`^.*((cos|cos-internal|cos-website|ci)\.[a-z-1]+|file)\.(myqcloud\.com|tencentcos\.cn).*$`) + hostPrefix = regexp.MustCompile(`^(http://|https://){0,1}([a-z0-9-]+-[0-9]+\.){0,1}((cos|cos-internal|cos-website|ci)\.[a-z-1]+|file)\.(myqcloud\.com|tencentcos\.cn).*$`) + invalidBucketErr = fmt.Errorf("invalid bucket format, please check your cos.BaseURL") +) + +// BaseURL 访问各 API 所需的基础 URL +type BaseURL struct { + // 访问 bucket, object 相关 API 的基础 URL(不包含 path 部分): http://example.com + BucketURL *url.URL + // 访问 service API 的基础 URL(不包含 path 部分): http://example.com + ServiceURL *url.URL + // 访问 job API 的基础 URL (不包含 path 部分): http://example.com + BatchURL *url.URL + // 访问 CI 的基础 URL + CIURL *url.URL + // 访问 Fetch Task 的基础 URL + FetchURL *url.URL +} + +// NewBucketURL 生成 BaseURL 所需的 BucketURL +// +// bucketName: bucket名称, bucket的命名规则为{name}-{appid} ,此处填写的存储桶名称必须为此格式 +// Region: 区域代码: ap-beijing-1,ap-beijing,ap-shanghai,ap-guangzhou... +// secure: 是否使用 https +func NewBucketURL(bucketName, region string, secure bool) (*url.URL, error) { + schema := "https" + if !secure { + schema = "http" + } + + if region == "" { + return nil, fmt.Errorf("region[%v] is invalid", region) + } + if bucketName == "" || !strings.ContainsAny(bucketName, "-") { + return nil, fmt.Errorf("bucketName[%v] is invalid", bucketName) + } + w := bytes.NewBuffer(nil) + bucketURLTemplate.Execute(w, struct { + Schema string + BucketName string + Region string + }{ + schema, bucketName, region, + }) + + u, _ := url.Parse(w.String()) + return u, nil +} + +type RetryOptions struct { + Count int + Interval time.Duration + StatusCode []int +} +type Config struct { + EnableCRC bool + RequestBodyClose bool + RetryOpt RetryOptions +} + +// Client is a client manages communication with the COS API. +type Client struct { + client *http.Client + + Host string + UserAgent string + BaseURL *BaseURL + + common service + + Service *ServiceService + Bucket *BucketService + Object *ObjectService + Batch *BatchService + CI *CIService + + Conf *Config +} + +type service struct { + client *Client +} + +// NewClient returns a new COS API client. +func NewClient(uri *BaseURL, httpClient *http.Client) *Client { + if httpClient == nil { + httpClient = &http.Client{} + } + + baseURL := &BaseURL{} + if uri != nil { + baseURL.BucketURL = uri.BucketURL + baseURL.ServiceURL = uri.ServiceURL + baseURL.BatchURL = uri.BatchURL + baseURL.CIURL = uri.CIURL + baseURL.FetchURL = uri.FetchURL + } + if baseURL.ServiceURL == nil { + baseURL.ServiceURL, _ = url.Parse(defaultServiceBaseURL) + } + + c := &Client{ + client: httpClient, + UserAgent: UserAgent, + BaseURL: baseURL, + Conf: &Config{ + EnableCRC: true, + RequestBodyClose: false, + RetryOpt: RetryOptions{ + Count: 3, + Interval: time.Duration(0), + }, + }, + } + c.common.client = c + c.Service = (*ServiceService)(&c.common) + c.Bucket = (*BucketService)(&c.common) + c.Object = (*ObjectService)(&c.common) + c.Batch = (*BatchService)(&c.common) + c.CI = (*CIService)(&c.common) + return c +} + +type Credential struct { + SecretID string + SecretKey string + SessionToken string +} + +func (c *Client) GetCredential() *Credential { + if auth, ok := c.client.Transport.(*AuthorizationTransport); ok { + auth.rwLocker.Lock() + defer auth.rwLocker.Unlock() + return &Credential{ + SecretID: auth.SecretID, + SecretKey: auth.SecretKey, + SessionToken: auth.SessionToken, + } + } + if auth, ok := c.client.Transport.(*CVMCredentialTransport); ok { + ak, sk, token, err := auth.GetCredential() + if err != nil { + return nil + } + return &Credential{ + SecretID: ak, + SecretKey: sk, + SessionToken: token, + } + } + if auth, ok := c.client.Transport.(*CredentialTransport); ok { + ak, sk, token := auth.Credential.GetSecretId(), auth.Credential.GetSecretKey(), auth.Credential.GetToken() + return &Credential{ + SecretID: ak, + SecretKey: sk, + SessionToken: token, + } + } + return nil +} + +func (c *Client) newRequest(ctx context.Context, baseURL *url.URL, uri, method string, body interface{}, optQuery interface{}, optHeader interface{}) (req *http.Request, err error) { + if !checkURL(baseURL) { + return nil, invalidBucketErr + } + uri, err = addURLOptions(uri, optQuery) + if err != nil { + return + } + u, _ := url.Parse(uri) + urlStr := baseURL.ResolveReference(u).String() + + var reader io.Reader + contentType := "" + contentMD5 := "" + if body != nil { + // 上传文件 + if r, ok := body.(io.Reader); ok { + reader = r + } else { + b, err := xml.Marshal(body) + if err != nil { + return nil, err + } + contentType = contentTypeXML + reader = bytes.NewReader(b) + contentMD5 = base64.StdEncoding.EncodeToString(calMD5Digest(b)) + } + } + + req, err = http.NewRequest(method, urlStr, reader) + if err != nil { + return + } + + req.Header, err = addHeaderOptions(req.Header, optHeader) + if err != nil { + return + } + if v := req.Header.Get("Content-Length"); req.ContentLength == 0 && v != "" && v != "0" { + req.ContentLength, _ = strconv.ParseInt(v, 10, 64) + } + + if contentMD5 != "" { + req.Header["Content-MD5"] = []string{contentMD5} + } + if v := req.Header.Get("User-Agent"); v == "" || !strings.HasPrefix(v, UserAgent) { + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } + } + if req.Header.Get("Content-Type") == "" && contentType != "" { + req.Header.Set("Content-Type", contentType) + } + if c.Host != "" { + req.Host = c.Host + } + if c.Conf.RequestBodyClose { + req.Close = true + } + return +} + +func (c *Client) doAPI(ctx context.Context, req *http.Request, result interface{}, closeBody bool) (*Response, error) { + var cancel context.CancelFunc + if closeBody { + ctx, cancel = context.WithCancel(ctx) + defer cancel() + } + req = req.WithContext(ctx) + + resp, err := c.client.Do(req) + if err != nil { + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + return nil, err + } + + defer func() { + if closeBody { + // Close the body to let the Transport reuse the connection + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + }() + + response := newResponse(resp) + + err = checkResponse(resp) + if err != nil { + // StatusCode != 2xx when Get Object + if !closeBody { + resp.Body.Close() + } + // even though there was an error, we still return the response + // in case the caller wants to inspect it further + return response, err + } + + // need CRC64 verification + if reader, ok := req.Body.(*teeReader); ok { + if c.Conf.EnableCRC && reader.writer != nil && !reader.disableCheckSum { + localcrc := reader.Crc64() + scoscrc := response.Header.Get("x-cos-hash-crc64ecma") + icoscrc, err := strconv.ParseUint(scoscrc, 10, 64) + if icoscrc != localcrc { + return response, fmt.Errorf("verification failed, want:%v, return:%v, x-cos-hash-crc64ecma:%v, err:%v", localcrc, icoscrc, scoscrc, err) + } + } + } + + if result != nil { + if w, ok := result.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + err = xml.NewDecoder(resp.Body).Decode(result) + if err == io.EOF { + err = nil // ignore EOF errors caused by empty response body + } + } + } + + return response, err +} + +type sendOptions struct { + // 基础 URL + baseURL *url.URL + // URL 中除基础 URL 外的剩余部分 + uri string + // 请求方法 + method string + + body interface{} + // url 查询参数 + optQuery interface{} + // http header 参数 + optHeader interface{} + // 用 result 反序列化 resp.Body + result interface{} + // 是否禁用自动调用 resp.Body.Close() + // 自动调用 Close() 是为了能够重用连接 + disableCloseBody bool +} + +func (c *Client) doRetry(ctx context.Context, opt *sendOptions) (resp *Response, err error) { + if opt.body != nil { + if _, ok := opt.body.(io.Reader); ok { + resp, err = c.send(ctx, opt) + return + } + } + count := 1 + if count < c.Conf.RetryOpt.Count { + count = c.Conf.RetryOpt.Count + } + nr := 0 + interval := c.Conf.RetryOpt.Interval + for nr < count { + resp, err = c.send(ctx, opt) + if err != nil && err != invalidBucketErr { + if resp != nil && resp.StatusCode <= 499 { + dobreak := true + for _, v := range c.Conf.RetryOpt.StatusCode { + if resp.StatusCode == v { + dobreak = false + break + } + } + if dobreak { + break + } + } + nr++ + if interval > 0 && nr < count { + time.Sleep(interval) + } + continue + } + break + } + return + +} +func (c *Client) send(ctx context.Context, opt *sendOptions) (resp *Response, err error) { + req, err := c.newRequest(ctx, opt.baseURL, opt.uri, opt.method, opt.body, opt.optQuery, opt.optHeader) + if err != nil { + return + } + + resp, err = c.doAPI(ctx, req, opt.result, !opt.disableCloseBody) + return +} + +// addURLOptions adds the parameters in opt as URL query parameters to s. opt +// must be a struct whose fields may contain "url" tags. +func addURLOptions(s string, opt interface{}) (string, error) { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + u, err := url.Parse(s) + if err != nil { + return s, err + } + + qs, err := query.Values(opt) + if err != nil { + return s, err + } + + // 保留原有的参数,并且放在前面。因为 cos 的 url 路由是以第一个参数作为路由的 + // e.g. /?uploads + q := u.RawQuery + rq := qs.Encode() + if q != "" { + if rq != "" { + u.RawQuery = fmt.Sprintf("%s&%s", q, qs.Encode()) + } + } else { + u.RawQuery = rq + } + return u.String(), nil +} + +// addHeaderOptions adds the parameters in opt as Header fields to req. opt +// must be a struct whose fields may contain "header" tags. +func addHeaderOptions(header http.Header, opt interface{}) (http.Header, error) { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Ptr && v.IsNil() { + return header, nil + } + + h, err := httpheader.Header(opt) + if err != nil { + return nil, err + } + + for key, values := range h { + for _, value := range values { + header.Add(key, value) + } + } + return header, nil +} + +func checkURL(baseURL *url.URL) bool { + host := baseURL.String() + if hostSuffix.MatchString(host) && !hostPrefix.MatchString(host) { + return false + } + return true +} + +// Owner defines Bucket/Object's owner +type Owner struct { + UIN string `xml:"uin,omitempty"` + ID string `xml:",omitempty"` + DisplayName string `xml:",omitempty"` +} + +// Initiator same to the Owner struct +type Initiator Owner + +// Response API 响应 +type Response struct { + *http.Response +} + +func newResponse(resp *http.Response) *Response { + return &Response{ + Response: resp, + } +} + +// ACLHeaderOptions is the option of ACLHeader +type ACLHeaderOptions struct { + XCosACL string `header:"x-cos-acl,omitempty" url:"-" xml:"-"` + XCosGrantRead string `header:"x-cos-grant-read,omitempty" url:"-" xml:"-"` + XCosGrantWrite string `header:"x-cos-grant-write,omitempty" url:"-" xml:"-"` + XCosGrantFullControl string `header:"x-cos-grant-full-control,omitempty" url:"-" xml:"-"` + XCosGrantReadACP string `header:"x-cos-grant-read-acp,omitempty" url:"-" xml:"-"` + XCosGrantWriteACP string `header:"x-cos-grant-write-acp,omitempty" url:"-" xml:"-"` +} + +// ACLGrantee is the param of ACLGrant +type ACLGrantee struct { + Type string `xml:"type,attr"` + UIN string `xml:"uin,omitempty"` + URI string `xml:"URI,omitempty"` + ID string `xml:",omitempty"` + DisplayName string `xml:",omitempty"` + SubAccount string `xml:"Subaccount,omitempty"` +} + +// ACLGrant is the param of ACLXml +type ACLGrant struct { + Grantee *ACLGrantee + Permission string +} + +// ACLXml is the ACL body struct +type ACLXml struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner *Owner + AccessControlList []ACLGrant `xml:"AccessControlList>Grant,omitempty"` +} + +func decodeACL(resp *Response, res *ACLXml) { + ItemMap := map[string]string{ + "ACL": "x-cos-acl", + "READ": "x-cos-grant-read", + "WRITE": "x-cos-grant-write", + "READ_ACP": "x-cos-grant-read-acp", + "WRITE_ACP": "x-cos-grant-write-acp", + "FULL_CONTROL": "x-cos-grant-full-control", + } + publicACL := make(map[string]int) + resACL := make(map[string][]string) + for _, item := range res.AccessControlList { + if item.Grantee == nil { + continue + } + if item.Grantee.ID == "qcs::cam::anyone:anyone" || item.Grantee.URI == "http://cam.qcloud.com/groups/global/AllUsers" { + publicACL[item.Permission] = 1 + } else if item.Grantee.ID != res.Owner.ID { + resACL[item.Permission] = append(resACL[item.Permission], "id=\""+item.Grantee.ID+"\"") + } + } + if publicACL["FULL_CONTROL"] == 1 || (publicACL["READ"] == 1 && publicACL["WRITE"] == 1) { + resACL["ACL"] = []string{"public-read-write"} + } else if publicACL["READ"] == 1 { + resACL["ACL"] = []string{"public-read"} + } else { + resACL["ACL"] = []string{"private"} + } + + for item, header := range ItemMap { + if len(resp.Header.Get(header)) > 0 || len(resACL[item]) == 0 { + continue + } + resp.Header.Set(header, uniqueGrantID(resACL[item])) + } +} + +func uniqueGrantID(grantIDs []string) string { + res := []string{} + filter := make(map[string]int) + for _, id := range grantIDs { + if filter[id] != 0 { + continue + } + filter[id] = 1 + res = append(res, id) + } + return strings.Join(res, ",") +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/doc.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/doc.go new file mode 100644 index 0000000..99005ab --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/doc.go @@ -0,0 +1,3 @@ +// Package cos is COS(Cloud Object Storage) Go SDK. The V5 version(XML API). +// There are examples of using each API in the project's 'example' directory. +package cos diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/error.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/error.go new file mode 100644 index 0000000..8f0a7c8 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/error.go @@ -0,0 +1,96 @@ +package cos + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "strings" +) + +// ErrorResponse 包含 API 返回的错误信息 +// +// https://www.qcloud.com/document/product/436/7730 +type ErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Response *http.Response `xml:"-"` + Code string + Message string + Resource string + RequestID string `header:"x-cos-request-id,omitempty" url:"-" xml:"RequestId,omitempty"` + TraceID string `xml:"TraceId,omitempty"` +} + +// Error returns the error msg +func (r *ErrorResponse) Error() string { + RequestID := r.RequestID + if RequestID == "" { + RequestID = r.Response.Header.Get("X-Cos-Request-Id") + } + TraceID := r.TraceID + if TraceID == "" { + TraceID = r.Response.Header.Get("X-Cos-Trace-Id") + } + decodeURL, err := decodeURIComponent(r.Response.Request.URL.String()) + if err != nil { + decodeURL = r.Response.Request.URL.String() + } + return fmt.Sprintf("%v %v: %d %v(Message: %v, RequestId: %v, TraceId: %v)", + r.Response.Request.Method, decodeURL, + r.Response.StatusCode, r.Code, r.Message, RequestID, TraceID) +} + +type jsonError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` + RequestID string `json:"request_id,omitempty"` +} + +// 检查 response 是否是出错时的返回的 response +func checkResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + if err == nil && data != nil { + xml.Unmarshal(data, errorResponse) + } + // 是否为 json 格式 + if errorResponse.Code == "" { + ctype := strings.TrimLeft(r.Header.Get("Content-Type"), " ") + if strings.HasPrefix(ctype, "application/json") { + var jerror jsonError + json.Unmarshal(data, &jerror) + errorResponse.Code = strconv.Itoa(jerror.Code) + errorResponse.Message = jerror.Message + errorResponse.RequestID = jerror.RequestID + } + + } + return errorResponse +} + +func IsNotFoundError(e error) bool { + if e == nil { + return false + } + err, ok := e.(*ErrorResponse) + if !ok { + return false + } + if err.Response != nil && err.Response.StatusCode == 404 { + return true + } + return false +} + +func IsCOSError(e error) (*ErrorResponse, bool) { + if e == nil { + return nil, false + } + err, ok := e.(*ErrorResponse) + return err, ok +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/helper.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/helper.go new file mode 100644 index 0000000..7af895c --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/helper.go @@ -0,0 +1,390 @@ +package cos + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "errors" + "fmt" + "github.com/mozillazg/go-httpheader" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" +) + +// 单次上传文件最大为5GB +const singleUploadMaxLength = 5 * 1024 * 1024 * 1024 +const singleUploadThreshold = 32 * 1024 * 1024 + +// 计算 md5 或 sha1 时的分块大小 +const calDigestBlockSize = 1024 * 1024 * 10 + +func calMD5Digest(msg []byte) []byte { + // TODO: 分块计算,减少内存消耗 + m := md5.New() + m.Write(msg) + return m.Sum(nil) +} + +func calSHA1Digest(msg []byte) []byte { + // TODO: 分块计算,减少内存消耗 + m := sha1.New() + m.Write(msg) + return m.Sum(nil) +} + +func calCRC64(fd io.Reader) (uint64, error) { + tb := crc64.MakeTable(crc64.ECMA) + hash := crc64.New(tb) + _, err := io.Copy(hash, fd) + if err != nil { + return 0, err + } + sum := hash.Sum64() + return sum, nil +} + +// cloneRequest returns a clone of the provided *http.Request. The clone is a +// shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +// encodeURIComponent like same function in javascript +// +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent +// +// http://www.ecma-international.org/ecma-262/6.0/#sec-uri-syntax-and-semantics +func encodeURIComponent(s string, excluded ...[]byte) string { + var b bytes.Buffer + written := 0 + + for i, n := 0, len(s); i < n; i++ { + c := s[i] + + switch c { + case '-', '_', '.', '!', '~', '*', '\'', '(', ')': + continue + default: + // Unreserved according to RFC 3986 sec 2.3 + if 'a' <= c && c <= 'z' { + + continue + + } + if 'A' <= c && c <= 'Z' { + + continue + + } + if '0' <= c && c <= '9' { + + continue + } + if len(excluded) > 0 { + conti := false + for _, ch := range excluded[0] { + if ch == c { + conti = true + break + } + } + if conti { + continue + } + } + } + + b.WriteString(s[written:i]) + fmt.Fprintf(&b, "%%%02X", c) + written = i + 1 + } + + if written == 0 { + return s + } + b.WriteString(s[written:]) + return b.String() +} + +func decodeURIComponent(s string) (string, error) { + decodeStr, err := url.QueryUnescape(s) + if err != nil { + return s, err + } + return decodeStr, err +} + +func DecodeURIComponent(s string) (string, error) { + return decodeURIComponent(s) +} + +func EncodeURIComponent(s string) string { + return encodeURIComponent(s) +} + +func GetReaderLen(reader io.Reader) (length int64, err error) { + switch v := reader.(type) { + case *bytes.Buffer: + length = int64(v.Len()) + case *bytes.Reader: + length = int64(v.Len()) + case *strings.Reader: + length = int64(v.Len()) + case *os.File: + stat, ferr := v.Stat() + if ferr != nil { + err = fmt.Errorf("can't get reader length: %s", ferr.Error()) + } else { + length = stat.Size() + } + case *io.LimitedReader: + length = int64(v.N) + case *LimitedReadCloser: + length = int64(v.N) + case FixedLengthReader: + length = v.Size() + default: + err = fmt.Errorf("can't get reader content length, unkown reader type") + } + return +} + +func IsLenReader(reader io.Reader) bool { + switch reader.(type) { + case *bytes.Buffer: + return true + case *bytes.Reader: + return true + case *strings.Reader: + return true + default: + return false + } + return false +} + +func CheckReaderLen(reader io.Reader) error { + nlen, err := GetReaderLen(reader) + if err != nil || nlen < singleUploadMaxLength { + return nil + } + return errors.New("The single object size you upload can not be larger than 5GB") +} + +func cloneHeader(opt *http.Header) *http.Header { + if opt == nil { + return nil + } + h := make(http.Header, len(*opt)) + for k, vv := range *opt { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h[k] = vv2 + } + return &h +} + +func CopyOptionsToMulti(opt *ObjectCopyOptions) *InitiateMultipartUploadOptions { + if opt == nil { + return nil + } + optini := &InitiateMultipartUploadOptions{ + opt.ACLHeaderOptions, + &ObjectPutHeaderOptions{}, + } + if opt.ObjectCopyHeaderOptions == nil { + return optini + } + optini.ObjectPutHeaderOptions = &ObjectPutHeaderOptions{ + CacheControl: opt.ObjectCopyHeaderOptions.CacheControl, + ContentDisposition: opt.ObjectCopyHeaderOptions.ContentDisposition, + ContentEncoding: opt.ObjectCopyHeaderOptions.ContentEncoding, + ContentType: opt.ObjectCopyHeaderOptions.ContentType, + ContentLanguage: opt.ObjectCopyHeaderOptions.ContentLanguage, + Expect: opt.ObjectCopyHeaderOptions.Expect, + Expires: opt.ObjectCopyHeaderOptions.Expires, + XCosMetaXXX: opt.ObjectCopyHeaderOptions.XCosMetaXXX, + XCosStorageClass: opt.ObjectCopyHeaderOptions.XCosStorageClass, + XCosServerSideEncryption: opt.ObjectCopyHeaderOptions.XCosServerSideEncryption, + XCosSSECustomerAglo: opt.ObjectCopyHeaderOptions.XCosSSECustomerAglo, + XCosSSECustomerKey: opt.ObjectCopyHeaderOptions.XCosSSECustomerKey, + XCosSSECustomerKeyMD5: opt.ObjectCopyHeaderOptions.XCosSSECustomerKeyMD5, + XOptionHeader: opt.ObjectCopyHeaderOptions.XOptionHeader, + } + return optini +} + +func CloneObjectPutOptions(opt *ObjectPutOptions) *ObjectPutOptions { + res := &ObjectPutOptions{ + &ACLHeaderOptions{}, + &ObjectPutHeaderOptions{}, + } + if opt != nil { + if opt.ACLHeaderOptions != nil { + *res.ACLHeaderOptions = *opt.ACLHeaderOptions + } + if opt.ObjectPutHeaderOptions != nil { + *res.ObjectPutHeaderOptions = *opt.ObjectPutHeaderOptions + res.XCosMetaXXX = cloneHeader(opt.XCosMetaXXX) + res.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + } + return res +} + +func CloneInitiateMultipartUploadOptions(opt *InitiateMultipartUploadOptions) *InitiateMultipartUploadOptions { + res := &InitiateMultipartUploadOptions{ + &ACLHeaderOptions{}, + &ObjectPutHeaderOptions{}, + } + if opt != nil { + if opt.ACLHeaderOptions != nil { + *res.ACLHeaderOptions = *opt.ACLHeaderOptions + } + if opt.ObjectPutHeaderOptions != nil { + *res.ObjectPutHeaderOptions = *opt.ObjectPutHeaderOptions + res.XCosMetaXXX = cloneHeader(opt.XCosMetaXXX) + res.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + } + return res +} + +func CloneObjectUploadPartOptions(opt *ObjectUploadPartOptions) *ObjectUploadPartOptions { + var res ObjectUploadPartOptions + if opt != nil { + res = *opt + res.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + return &res +} + +func CloneObjectGetOptions(opt *ObjectGetOptions) *ObjectGetOptions { + var res ObjectGetOptions + if opt != nil { + res = *opt + res.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + return &res +} + +func CloneCompleteMultipartUploadOptions(opt *CompleteMultipartUploadOptions) *CompleteMultipartUploadOptions { + var res CompleteMultipartUploadOptions + if opt != nil { + res.XMLName = opt.XMLName + if len(opt.Parts) > 0 { + res.Parts = make([]Object, len(opt.Parts)) + copy(res.Parts, opt.Parts) + } + res.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + return &res +} + +type RangeOptions struct { + HasStart bool + HasEnd bool + Start int64 + End int64 +} + +func FormatRangeOptions(opt *RangeOptions) string { + if opt == nil { + return "" + } + if opt.HasStart && opt.HasEnd { + return fmt.Sprintf("bytes=%v-%v", opt.Start, opt.End) + } + if opt.HasStart { + return fmt.Sprintf("bytes=%v-", opt.Start) + } + if opt.HasEnd { + return fmt.Sprintf("bytes=-%v", opt.End) + } + return "" +} +func GetRangeOptions(opt *ObjectGetOptions) (*RangeOptions, error) { + if opt == nil || opt.Range == "" { + return nil, nil + } + return GetRange(opt.Range) +} + +func GetRange(rangeStr string) (*RangeOptions, error) { + // bytes=M-N + slices := strings.Split(rangeStr, "=") + if len(slices) != 2 || slices[0] != "bytes" { + return nil, fmt.Errorf("Invalid Parameter Range: %v", rangeStr) + } + // byte=M-N, X-Y + fSlice := strings.Split(slices[1], ",") + rstr := fSlice[0] + + var err error + var ropt RangeOptions + sted := strings.Split(rstr, "-") + if len(sted) != 2 { + return nil, fmt.Errorf("Invalid Parameter Range: %v", rangeStr) + } + // M + if len(sted[0]) > 0 { + ropt.Start, err = strconv.ParseInt(sted[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("Invalid Parameter Range: %v,err: %v", rangeStr, err) + } + ropt.HasStart = true + } + // N + if len(sted[1]) > 0 { + ropt.End, err = strconv.ParseInt(sted[1], 10, 64) + if err != nil || ropt.End == 0 { + return nil, fmt.Errorf("Invalid Parameter Range: %v,err: %v", rangeStr, err) + } + ropt.HasEnd = true + } + return &ropt, nil +} + +var deliverHeader = map[string]bool{} + +func isDeliverHeader(key string) bool { + for k, v := range deliverHeader { + if key == k && v { + return true + } + } + return strings.HasPrefix(key, privateHeaderPrefix) +} + +func deliverInitOptions(opt *InitiateMultipartUploadOptions) (*http.Header, error) { + if opt == nil { + return nil, nil + } + h, err := httpheader.Header(opt) + if err != nil { + return nil, err + } + header := &http.Header{} + for key, values := range h { + key = strings.ToLower(key) + if isDeliverHeader(key) { + for _, value := range values { + header.Add(key, value) + } + } + } + return header, nil +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/object.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/object.go new file mode 100644 index 0000000..bed60d4 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/object.go @@ -0,0 +1,1617 @@ +package cos + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// ObjectService 相关 API +type ObjectService service + +// ObjectGetOptions is the option of GetObject +type ObjectGetOptions struct { + ResponseContentType string `url:"response-content-type,omitempty" header:"-"` + ResponseContentLanguage string `url:"response-content-language,omitempty" header:"-"` + ResponseExpires string `url:"response-expires,omitempty" header:"-"` + ResponseCacheControl string `url:"response-cache-control,omitempty" header:"-"` + ResponseContentDisposition string `url:"response-content-disposition,omitempty" header:"-"` + ResponseContentEncoding string `url:"response-content-encoding,omitempty" header:"-"` + Range string `url:"-" header:"Range,omitempty"` + IfModifiedSince string `url:"-" header:"If-Modified-Since,omitempty"` + // SSE-C + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + + //兼容其他自定义头部 + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` + XCosTrafficLimit int `header:"x-cos-traffic-limit,omitempty" url:"-" xml:"-"` + + // 下载进度, ProgressCompleteEvent不能表示对应API调用成功,API是否调用成功的判断标准为返回err==nil + Listener ProgressListener `header:"-" url:"-" xml:"-"` +} + +// presignedURLTestingOptions is the opt of presigned url +type presignedURLTestingOptions struct { + authTime *AuthTime +} + +// Get Object 请求可以将一个文件(Object)下载至本地。 +// 该操作需要对目标 Object 具有读权限或目标 Object 对所有人都开放了读权限(公有读)。 +// +// https://www.qcloud.com/document/product/436/7753 +func (s *ObjectService) Get(ctx context.Context, name string, opt *ObjectGetOptions, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = "/" + encodeURIComponent(name) + } else { + return nil, errors.New("wrong params") + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optQuery: opt, + optHeader: opt, + disableCloseBody: true, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + + if opt != nil && opt.Listener != nil { + if err == nil && resp != nil { + if totalBytes, e := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64); e == nil { + resp.Body = TeeReader(resp.Body, nil, totalBytes, opt.Listener) + } + } + } + return resp, err +} + +// GetToFile download the object to local file +func (s *ObjectService) GetToFile(ctx context.Context, name, localpath string, opt *ObjectGetOptions, id ...string) (*Response, error) { + resp, err := s.Get(ctx, name, opt, id...) + if err != nil { + return resp, err + } + defer resp.Body.Close() + + // If file exist, overwrite it + fd, err := os.OpenFile(localpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) + if err != nil { + return resp, err + } + + _, err = io.Copy(fd, resp.Body) + fd.Close() + if err != nil { + return resp, err + } + + return resp, nil +} + +func (s *ObjectService) GetObjectURL(name string) *url.URL { + uri, _ := url.Parse("/" + encodeURIComponent(name, []byte{'/'})) + return s.client.BaseURL.BucketURL.ResolveReference(uri) +} + +type PresignedURLOptions struct { + Query *url.Values `xml:"-" url:"-" header:"-"` + Header *http.Header `header:"-,omitempty" url:"-" xml:"-"` + SignMerged bool `xml:"-" url:"-" header:"-"` +} + +// GetPresignedURL get the object presigned to down or upload file by url +// 预签名函数,signHost: 默认签入Header Host, 您也可以选择不签入Header Host,但可能导致请求失败或安全漏洞 +func (s *ObjectService) GetPresignedURL(ctx context.Context, httpMethod, name, ak, sk string, expired time.Duration, opt interface{}, signHost ...bool) (*url.URL, error) { + // 兼容 name 以 / 开头的情况 + if strings.HasPrefix(name, "/") { + name = encodeURIComponent("/") + encodeURIComponent(name[1:], []byte{'/'}) + } else { + name = encodeURIComponent(name, []byte{'/'}) + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + name, + method: httpMethod, + optQuery: opt, + optHeader: opt, + } + if popt, ok := opt.(*PresignedURLOptions); ok { + if popt != nil && popt.Query != nil { + qs := popt.Query.Encode() + if qs != "" { + sendOpt.uri = fmt.Sprintf("%s?%s", sendOpt.uri, qs) + } + } + } + req, err := s.client.newRequest(ctx, sendOpt.baseURL, sendOpt.uri, sendOpt.method, sendOpt.body, sendOpt.optQuery, sendOpt.optHeader) + if err != nil { + return nil, err + } + + var authTime *AuthTime + if opt != nil { + if opt, ok := opt.(*presignedURLTestingOptions); ok { + authTime = opt.authTime + } + } + if authTime == nil { + authTime = NewAuthTime(expired) + } + signedHost := true + if len(signHost) > 0 { + signedHost = signHost[0] + } + authorization := newAuthorization(ak, sk, req, authTime, signedHost) + if opt != nil { + if opt, ok := opt.(*PresignedURLOptions); ok { + if opt.SignMerged { + sign := encodeURIComponent(authorization) + if req.URL.RawQuery == "" { + req.URL.RawQuery = fmt.Sprintf("sign=%s", sign) + } else { + req.URL.RawQuery = fmt.Sprintf("%s&sign=%s", req.URL.RawQuery, sign) + } + return req.URL, nil + } + } + } + sign := encodeURIComponent(authorization, []byte{'&', '='}) + + if req.URL.RawQuery == "" { + req.URL.RawQuery = fmt.Sprintf("%s", sign) + } else { + req.URL.RawQuery = fmt.Sprintf("%s&%s", req.URL.RawQuery, sign) + } + return req.URL, nil +} + +func (s *ObjectService) GetSignature(ctx context.Context, httpMethod, name, ak, sk string, expired time.Duration, opt *PresignedURLOptions, signHost ...bool) string { + // 兼容 name 以 / 开头的情况 + if strings.HasPrefix(name, "/") { + name = encodeURIComponent("/") + encodeURIComponent(name[1:], []byte{'/'}) + } else { + name = encodeURIComponent(name, []byte{'/'}) + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + name, + method: httpMethod, + optQuery: opt, + optHeader: opt, + } + if opt != nil && opt.Query != nil { + qs := opt.Query.Encode() + if qs != "" { + sendOpt.uri = fmt.Sprintf("%s?%s", sendOpt.uri, qs) + } + } + req, err := s.client.newRequest(ctx, sendOpt.baseURL, sendOpt.uri, sendOpt.method, sendOpt.body, sendOpt.optQuery, sendOpt.optHeader) + if err != nil { + return "" + } + + authTime := NewAuthTime(expired) + signedHost := true + if len(signHost) > 0 { + signedHost = signHost[0] + } + authorization := newAuthorization(ak, sk, req, authTime, signedHost) + return authorization +} + +// ObjectPutHeaderOptions the options of header of the put object +type ObjectPutHeaderOptions struct { + CacheControl string `header:"Cache-Control,omitempty" url:"-"` + ContentDisposition string `header:"Content-Disposition,omitempty" url:"-"` + ContentEncoding string `header:"Content-Encoding,omitempty" url:"-"` + ContentType string `header:"Content-Type,omitempty" url:"-"` + ContentMD5 string `header:"Content-MD5,omitempty" url:"-"` + ContentLength int64 `header:"Content-Length,omitempty" url:"-"` + ContentLanguage string `header:"Content-Language,omitempty" url:"-"` + Expect string `header:"Expect,omitempty" url:"-"` + Expires string `header:"Expires,omitempty" url:"-"` + XCosContentSHA1 string `header:"x-cos-content-sha1,omitempty" url:"-"` + // 自定义的 x-cos-meta-* header + XCosMetaXXX *http.Header `header:"x-cos-meta-*,omitempty" url:"-"` + XCosStorageClass string `header:"x-cos-storage-class,omitempty" url:"-"` + // 可选值: Normal, Appendable + //XCosObjectType string `header:"x-cos-object-type,omitempty" url:"-"` + // Enable Server Side Encryption, Only supported: AES256 + XCosServerSideEncryption string `header:"x-cos-server-side-encryption,omitempty" url:"-" xml:"-"` + // SSE-C + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + //兼容其他自定义头部 + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` + XCosTrafficLimit int `header:"x-cos-traffic-limit,omitempty" url:"-" xml:"-"` + + // 上传进度, ProgressCompleteEvent不能表示对应API调用成功,API是否调用成功的判断标准为返回err==nil + Listener ProgressListener `header:"-" url:"-" xml:"-"` +} + +// ObjectPutOptions the options of put object +type ObjectPutOptions struct { + *ACLHeaderOptions `header:",omitempty" url:"-" xml:"-"` + *ObjectPutHeaderOptions `header:",omitempty" url:"-" xml:"-"` +} + +// Put Object请求可以将一个文件(Oject)上传至指定Bucket。 +// +// https://www.qcloud.com/document/product/436/7749 +func (s *ObjectService) Put(ctx context.Context, name string, r io.Reader, uopt *ObjectPutOptions) (*Response, error) { + if r == nil { + return nil, fmt.Errorf("reader is nil") + } + if err := CheckReaderLen(r); err != nil { + return nil, err + } + opt := CloneObjectPutOptions(uopt) + totalBytes, err := GetReaderLen(r) + if err != nil && opt != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return nil, err + } + totalBytes = opt.ContentLength + } + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader由用户指定ContentLength, 或使用 Chunk 上传 + if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + opt.ContentLength = totalBytes + } + } + reader := TeeReader(r, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = crc64.New(crc64.MakeTable(crc64.ECMA)) + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodPut, + body: reader, + optHeader: opt, + } + resp, err := s.client.send(ctx, &sendOpt) + + return resp, err +} + +// PutFromFile put object from local file +func (s *ObjectService) PutFromFile(ctx context.Context, name string, filePath string, opt *ObjectPutOptions) (resp *Response, err error) { + nr := 0 + for nr < 3 { + fd, e := os.Open(filePath) + if e != nil { + err = e + return + } + resp, err = s.Put(ctx, name, fd, opt) + if err != nil { + nr++ + fd.Close() + continue + } + fd.Close() + break + } + return +} + +// ObjectCopyHeaderOptions is the head option of the Copy +type ObjectCopyHeaderOptions struct { + // When use replace directive to update meta infos + CacheControl string `header:"Cache-Control,omitempty" url:"-"` + ContentDisposition string `header:"Content-Disposition,omitempty" url:"-"` + ContentEncoding string `header:"Content-Encoding,omitempty" url:"-"` + ContentLanguage string `header:"Content-Language,omitempty" url:"-"` + ContentType string `header:"Content-Type,omitempty" url:"-"` + Expires string `header:"Expires,omitempty" url:"-"` + Expect string `header:"Expect,omitempty" url:"-"` + XCosMetadataDirective string `header:"x-cos-metadata-directive,omitempty" url:"-" xml:"-"` + XCosCopySourceIfModifiedSince string `header:"x-cos-copy-source-If-Modified-Since,omitempty" url:"-" xml:"-"` + XCosCopySourceIfUnmodifiedSince string `header:"x-cos-copy-source-If-Unmodified-Since,omitempty" url:"-" xml:"-"` + XCosCopySourceIfMatch string `header:"x-cos-copy-source-If-Match,omitempty" url:"-" xml:"-"` + XCosCopySourceIfNoneMatch string `header:"x-cos-copy-source-If-None-Match,omitempty" url:"-" xml:"-"` + XCosStorageClass string `header:"x-cos-storage-class,omitempty" url:"-" xml:"-"` + // 自定义的 x-cos-meta-* header + XCosMetaXXX *http.Header `header:"x-cos-meta-*,omitempty" url:"-"` + XCosCopySource string `header:"x-cos-copy-source" url:"-" xml:"-"` + XCosServerSideEncryption string `header:"x-cos-server-side-encryption,omitempty" url:"-" xml:"-"` + // SSE-C + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + XCosCopySourceSSECustomerAglo string `header:"x-cos-copy-source-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosCopySourceSSECustomerKey string `header:"x-cos-copy-source-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosCopySourceSSECustomerKeyMD5 string `header:"x-cos-copy-source-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + //兼容其他自定义头部 + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// ObjectCopyOptions is the option of Copy, choose header or body +type ObjectCopyOptions struct { + *ObjectCopyHeaderOptions `header:",omitempty" url:"-" xml:"-"` + *ACLHeaderOptions `header:",omitempty" url:"-" xml:"-"` +} + +// ObjectCopyResult is the result of Copy +type ObjectCopyResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + ETag string `xml:"ETag,omitempty"` + LastModified string `xml:"LastModified,omitempty"` + CRC64 string `xml:"CRC64,omitempty"` + VersionId string `xml:"VersionId,omitempty"` +} + +// Copy 调用 PutObjectCopy 请求实现将一个文件从源路径复制到目标路径。建议文件大小 1M 到 5G, +// 超过 5G 的文件请使用分块上传 Upload - Copy。在拷贝的过程中,文件元属性和 ACL 可以被修改。 +// +// 用户可以通过该接口实现文件移动,文件重命名,修改文件属性和创建副本。 +// +// 注意:在跨帐号复制的时候,需要先设置被复制文件的权限为公有读,或者对目标帐号赋权,同帐号则不需要。 +// +// https://cloud.tencent.com/document/product/436/10881 +func (s *ObjectService) Copy(ctx context.Context, name, sourceURL string, opt *ObjectCopyOptions, id ...string) (*ObjectCopyResult, *Response, error) { + surl := strings.SplitN(sourceURL, "/", 2) + if len(surl) < 2 { + return nil, nil, errors.New(fmt.Sprintf("x-cos-copy-source format error: %s", sourceURL)) + } + var u string + if len(id) == 1 { + u = fmt.Sprintf("%s/%s?versionId=%s", surl[0], encodeURIComponent(surl[1]), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("%s/%s", surl[0], encodeURIComponent(surl[1])) + } else { + return nil, nil, errors.New("wrong params") + } + + var res ObjectCopyResult + copyOpt := &ObjectCopyOptions{ + &ObjectCopyHeaderOptions{}, + &ACLHeaderOptions{}, + } + if opt != nil { + if opt.ObjectCopyHeaderOptions != nil { + *copyOpt.ObjectCopyHeaderOptions = *opt.ObjectCopyHeaderOptions + } + if opt.ACLHeaderOptions != nil { + *copyOpt.ACLHeaderOptions = *opt.ACLHeaderOptions + } + } + copyOpt.XCosCopySource = u + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodPut, + body: nil, + optHeader: copyOpt, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + // If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. + if err == nil && resp.StatusCode == 200 { + if res.ETag == "" { + return &res, resp, errors.New("response 200 OK, but body contains an error") + } + } + return &res, resp, err +} + +type ObjectDeleteOptions struct { + // SSE-C + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + //兼容其他自定义头部 + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` + VersionId string `header:"-" url:"VersionId,omitempty" xml:"-"` +} + +// Delete Object请求可以将一个文件(Object)删除。 +// +// https://www.qcloud.com/document/product/436/7743 +func (s *ObjectService) Delete(ctx context.Context, name string, opt ...*ObjectDeleteOptions) (*Response, error) { + var optHeader *ObjectDeleteOptions + // When use "" string might call the delete bucket interface + if len(name) == 0 || name == "/" { + return nil, errors.New("empty object name") + } + if len(opt) > 0 { + optHeader = opt[0] + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodDelete, + optHeader: optHeader, + optQuery: optHeader, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// ObjectHeadOptions is the option of HeadObject +type ObjectHeadOptions struct { + IfModifiedSince string `url:"-" header:"If-Modified-Since,omitempty"` + // SSE-C + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +// Head Object请求可以取回对应Object的元数据,Head的权限与Get的权限一致 +// +// https://www.qcloud.com/document/product/436/7745 +func (s *ObjectService) Head(ctx context.Context, name string, opt *ObjectHeadOptions, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?versionId=%s", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = "/" + encodeURIComponent(name) + } else { + return nil, errors.New("wrong params") + } + + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodHead, + optHeader: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + if resp != nil && resp.Header["X-Cos-Object-Type"] != nil && resp.Header["X-Cos-Object-Type"][0] == "appendable" { + resp.Header.Add("x-cos-next-append-position", resp.Header["Content-Length"][0]) + } + + return resp, err +} + +func (s *ObjectService) IsExist(ctx context.Context, name string, id ...string) (bool, error) { + _, err := s.Head(ctx, name, nil, id...) + if err == nil { + return true, nil + } + if IsNotFoundError(err) { + return false, nil + } + return false, err +} + +// ObjectOptionsOptions is the option of object options +type ObjectOptionsOptions struct { + Origin string `url:"-" header:"Origin"` + AccessControlRequestMethod string `url:"-" header:"Access-Control-Request-Method"` + AccessControlRequestHeaders string `url:"-" header:"Access-Control-Request-Headers,omitempty"` +} + +// Options Object请求实现跨域访问的预请求。即发出一个 OPTIONS 请求给服务器以确认是否可以进行跨域操作。 +// +// 当CORS配置不存在时,请求返回403 Forbidden。 +// +// https://www.qcloud.com/document/product/436/8288 +func (s *ObjectService) Options(ctx context.Context, name string, opt *ObjectOptionsOptions) (*Response, error) { + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name), + method: http.MethodOptions, + optHeader: opt, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +// CASJobParameters support three way: Standard(in 35 hours), Expedited(quick way, in 15 mins), Bulk(in 5-12 hours_ +type CASJobParameters struct { + Tier string `xml:"Tier" header:"-" url:"-"` +} + +// ObjectRestoreOptions is the option of object restore +type ObjectRestoreOptions struct { + XMLName xml.Name `xml:"RestoreRequest" header:"-" url:"-"` + Days int `xml:"Days" header:"-" url:"-"` + Tier *CASJobParameters `xml:"CASJobParameters" header:"-" url:"-"` + XOptionHeader *http.Header `xml:"-" header:",omitempty" url:"-"` +} + +// PutRestore API can recover an object of type archived by COS archive. +// +// https://cloud.tencent.com/document/product/436/12633 +func (s *ObjectService) PostRestore(ctx context.Context, name string, opt *ObjectRestoreOptions) (*Response, error) { + u := fmt.Sprintf("/%s?restore", encodeURIComponent(name)) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPost, + body: opt, + optHeader: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + + return resp, err +} + +// Append请求可以将一个文件(Object)以分块追加的方式上传至 Bucket 中。使用Append Upload的文件必须事前被设定为Appendable。 +// 当Appendable的文件被执行Put Object的操作以后,文件被覆盖,属性改变为Normal。 +// +// 文件属性可以在Head Object操作中被查询到,当您发起Head Object请求时,会返回自定义Header『x-cos-object-type』,该Header只有两个枚举值:Normal或者Appendable。 +// +// 追加上传建议文件大小1M - 5G。如果position的值和当前Object的长度不致,COS会返回409错误。 +// 如果Append一个Normal的Object,COS会返回409 ObjectNotAppendable。 +// +// Appendable的文件不可以被复制,不参与版本管理,不参与生命周期管理,不可跨区域复制。 +// +// 当 r 不是 bytes.Buffer/bytes.Reader/strings.Reader 时,必须指定 opt.ObjectPutHeaderOptions.ContentLength +// +// https://www.qcloud.com/document/product/436/7741 +func (s *ObjectService) Append(ctx context.Context, name string, position int, r io.Reader, opt *ObjectPutOptions) (int, *Response, error) { + res := position + if r == nil { + return res, nil, fmt.Errorf("reader is nil") + } + if err := CheckReaderLen(r); err != nil { + return res, nil, err + } + opt = CloneObjectPutOptions(opt) + totalBytes, err := GetReaderLen(r) + if err != nil && opt != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return res, nil, err + } + totalBytes = opt.ContentLength + } + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader需用户指定ContentLength + if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + opt.ContentLength = totalBytes + } + } + reader := TeeReader(r, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = md5.New() // MD5校验 + reader.disableCheckSum = true + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + u := fmt.Sprintf("/%s?append&position=%d", encodeURIComponent(name), position) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPost, + optHeader: opt, + body: reader, + } + resp, err := s.client.send(ctx, &sendOpt) + + if err == nil { + // 数据校验 + if s.client.Conf.EnableCRC && reader.writer != nil { + wanted := hex.EncodeToString(reader.Sum()) + if wanted != resp.Header.Get("x-cos-content-sha1") { + return res, resp, fmt.Errorf("append verification failed, want:%v, return:%v", wanted, resp.Header.Get("x-cos-content-sha1")) + } + } + np, err := strconv.ParseInt(resp.Header.Get("x-cos-next-append-position"), 10, 64) + return int(np), resp, err + } + return res, resp, err +} + +// ObjectDeleteMultiOptions is the option of DeleteMulti +type ObjectDeleteMultiOptions struct { + XMLName xml.Name `xml:"Delete" header:"-"` + Quiet bool `xml:"Quiet" header:"-"` + Objects []Object `xml:"Object" header:"-"` + //XCosSha1 string `xml:"-" header:"x-cos-sha1"` +} + +// ObjectDeleteMultiResult is the result of DeleteMulti +type ObjectDeleteMultiResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []Object `xml:"Deleted,omitempty"` + Errors []struct { + Key string `xml:",omitempty"` + Code string `xml:",omitempty"` + Message string `xml:",omitempty"` + VersionId string `xml:",omitempty"` + } `xml:"Error,omitempty"` +} + +// DeleteMulti 请求实现批量删除文件,最大支持单次删除1000个文件。 +// 对于返回结果,COS提供Verbose和Quiet两种结果模式。Verbose模式将返回每个Object的删除结果; +// Quiet模式只返回报错的Object信息。 +// https://www.qcloud.com/document/product/436/8289 +func (s *ObjectService) DeleteMulti(ctx context.Context, opt *ObjectDeleteMultiOptions) (*ObjectDeleteMultiResult, *Response, error) { + var res ObjectDeleteMultiResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?delete", + method: http.MethodPost, + body: opt, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// Object is the meta info of the object +type Object struct { + Key string `xml:",omitempty"` + ETag string `xml:",omitempty"` + Size int64 `xml:",omitempty"` + PartNumber int `xml:",omitempty"` + LastModified string `xml:",omitempty"` + StorageClass string `xml:",omitempty"` + Owner *Owner `xml:",omitempty"` + VersionId string `xml:",omitempty"` +} + +// MultiUploadOptions is the option of the multiupload, +// ThreadPoolSize default is one +type MultiUploadOptions struct { + OptIni *InitiateMultipartUploadOptions + PartSize int64 + ThreadPoolSize int + CheckPoint bool + DisableChecksum bool +} + +type MultiDownloadOptions struct { + Opt *ObjectGetOptions + PartSize int64 + ThreadPoolSize int + CheckPoint bool + CheckPointFile string + DisableChecksum bool +} + +type MultiDownloadCPInfo struct { + Size int64 `json:"contentLength,omitempty"` + ETag string `json:"eTag,omitempty"` + CRC64 string `json:"crc64ecma,omitempty"` + LastModified string `json:"lastModified,omitempty"` + DownloadedBlocks []DownloadedBlock `json:"downloadedBlocks,omitempty"` +} +type DownloadedBlock struct { + From int64 `json:"from,omitempty"` + To int64 `json:"to,omitempty"` +} + +type Chunk struct { + Number int + OffSet int64 + Size int64 + Done bool + ETag string +} + +// jobs +type Jobs struct { + Name string + UploadId string + FilePath string + RetryTimes int + VersionId []string + Chunk Chunk + Data io.Reader + Opt *ObjectUploadPartOptions + DownOpt *ObjectGetOptions +} + +type Results struct { + PartNumber int + Resp *Response + err error +} + +func LimitReadCloser(r io.Reader, n int64) io.Reader { + var lc LimitedReadCloser + lc.R = r + lc.N = n + return &lc +} + +type LimitedReadCloser struct { + io.LimitedReader +} + +func (lc *LimitedReadCloser) Close() error { + if r, ok := lc.R.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(data []byte) (int, error) { + n, err := drc.RC.Read(data) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(data[0:realLen], data[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + if rc, ok := drc.RC.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} + +func worker(ctx context.Context, s *ObjectService, jobs <-chan *Jobs, results chan<- *Results) { + for j := range jobs { + j.Opt.ContentLength = j.Chunk.Size + + rt := j.RetryTimes + for { + // http.Request.Body can be Closed in request + fd, err := os.Open(j.FilePath) + var res Results + if err != nil { + res.err = err + res.PartNumber = j.Chunk.Number + res.Resp = nil + results <- &res + break + } + fd.Seek(j.Chunk.OffSet, os.SEEK_SET) + resp, err := s.UploadPart(ctx, j.Name, j.UploadId, j.Chunk.Number, + LimitReadCloser(fd, j.Chunk.Size), j.Opt) + res.PartNumber = j.Chunk.Number + res.Resp = resp + res.err = err + if err != nil { + rt-- + if rt == 0 { + results <- &res + break + } + time.Sleep(time.Millisecond) + continue + } + results <- &res + break + } + } +} + +func downloadWorker(ctx context.Context, s *ObjectService, jobs <-chan *Jobs, results chan<- *Results) { + for j := range jobs { + opt := &RangeOptions{ + HasStart: true, + HasEnd: true, + Start: j.Chunk.OffSet, + End: j.Chunk.OffSet + j.Chunk.Size - 1, + } + j.DownOpt.Range = FormatRangeOptions(opt) + rt := j.RetryTimes + for { + var res Results + res.PartNumber = j.Chunk.Number + resp, err := s.Get(ctx, j.Name, j.DownOpt, j.VersionId...) + res.err = err + res.Resp = resp + if err != nil { + results <- &res + break + } + fd, err := os.OpenFile(j.FilePath, os.O_WRONLY, 0660) + if err != nil { + resp.Body.Close() + res.err = err + results <- &res + break + } + fd.Seek(j.Chunk.OffSet, os.SEEK_SET) + n, err := io.Copy(fd, LimitReadCloser(resp.Body, j.Chunk.Size)) + if n != j.Chunk.Size || err != nil { + fd.Close() + resp.Body.Close() + rt-- + if rt == 0 { + res.err = fmt.Errorf("io.Copy Failed, nread:%v, want:%v, err:%v", n, j.Chunk.Size, err) + results <- &res + break + } + time.Sleep(time.Millisecond) + continue + } + fd.Close() + resp.Body.Close() + results <- &res + break + } + } +} + +func DividePart(fileSize int64, last int) (int64, int64) { + partSize := int64(last * 1024 * 1024) + partNum := fileSize / partSize + for partNum >= 10000 { + partSize = partSize * 2 + partNum = fileSize / partSize + } + return partNum, partSize +} + +func SplitFileIntoChunks(filePath string, partSize int64) (int64, []Chunk, int, error) { + if filePath == "" { + return 0, nil, 0, errors.New("filePath invalid") + } + + file, err := os.Open(filePath) + if err != nil { + return 0, nil, 0, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return 0, nil, 0, err + } + var partNum int64 + if partSize > 0 { + if partSize < 1024*1024 { + return 0, nil, 0, errors.New("partSize>=1048576 is required") + } + partNum = stat.Size() / partSize + if partNum >= 10000 { + return 0, nil, 0, errors.New("Too many parts, out of 10000") + } + } else { + partNum, partSize = DividePart(stat.Size(), 16) + } + + var chunks []Chunk + var chunk = Chunk{} + for i := int64(0); i < partNum; i++ { + chunk.Number = int(i + 1) + chunk.OffSet = i * partSize + chunk.Size = partSize + chunks = append(chunks, chunk) + } + + if stat.Size()%partSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.OffSet = int64(len(chunks)) * partSize + chunk.Size = stat.Size() % partSize + chunks = append(chunks, chunk) + partNum++ + } + + return int64(stat.Size()), chunks, int(partNum), nil + +} + +func (s *ObjectService) getResumableUploadID(ctx context.Context, name string) (string, error) { + opt := &ObjectListUploadsOptions{ + Prefix: name, + EncodingType: "url", + } + res, _, err := s.ListUploads(ctx, opt) + if err != nil { + return "", err + } + if len(res.Upload) == 0 { + return "", nil + } + last := len(res.Upload) - 1 + for last >= 0 { + decodeKey, _ := decodeURIComponent(res.Upload[last].Key) + if decodeKey == name { + return decodeURIComponent(res.Upload[last].UploadID) + } + last = last - 1 + } + return "", nil +} + +func (s *ObjectService) checkUploadedParts(ctx context.Context, name, UploadID, filepath string, chunks []Chunk, partNum int) error { + var uploadedParts []Object + isTruncated := true + opt := &ObjectListPartsOptions{ + EncodingType: "url", + } + for isTruncated { + res, _, err := s.ListParts(ctx, name, UploadID, opt) + if err != nil { + return err + } + if len(res.Parts) > 0 { + uploadedParts = append(uploadedParts, res.Parts...) + } + isTruncated = res.IsTruncated + opt.PartNumberMarker = res.NextPartNumberMarker + } + fd, err := os.Open(filepath) + if err != nil { + return err + } + defer fd.Close() + // 某个分块出错, 重置chunks + ret := func(e error) error { + for i, _ := range chunks { + chunks[i].Done = false + chunks[i].ETag = "" + } + return e + } + for _, part := range uploadedParts { + partNumber := part.PartNumber + if partNumber > partNum { + return ret(errors.New("Part Number is not consistent")) + } + partNumber = partNumber - 1 + fd.Seek(chunks[partNumber].OffSet, os.SEEK_SET) + bs, err := ioutil.ReadAll(io.LimitReader(fd, chunks[partNumber].Size)) + if err != nil { + return ret(err) + } + localMD5 := fmt.Sprintf("\"%x\"", md5.Sum(bs)) + if localMD5 != part.ETag { + return ret(errors.New(fmt.Sprintf("CheckSum Failed in Part[%d]", part.PartNumber))) + } + chunks[partNumber].Done = true + chunks[partNumber].ETag = part.ETag + } + return nil +} + +// MultiUpload/Upload 为高级upload接口,并发分块上传 +// +// 当 partSize > 0 时,由调用者指定分块大小,否则由 SDK 自动切分,单位为MB +// 由调用者指定分块大小时,请确认分块数量不超过10000 +// +func (s *ObjectService) MultiUpload(ctx context.Context, name string, filepath string, opt *MultiUploadOptions) (*CompleteMultipartUploadResult, *Response, error) { + return s.Upload(ctx, name, filepath, opt) +} + +func (s *ObjectService) Upload(ctx context.Context, name string, filepath string, opt *MultiUploadOptions) (*CompleteMultipartUploadResult, *Response, error) { + if opt == nil { + opt = &MultiUploadOptions{} + } + var localcrc uint64 + // 1.Get the file chunk + totalBytes, chunks, partNum, err := SplitFileIntoChunks(filepath, opt.PartSize*1024*1024) + if err != nil { + return nil, nil, err + } + // 校验 + if s.client.Conf.EnableCRC && !opt.DisableChecksum { + fd, err := os.Open(filepath) + if err != nil { + return nil, nil, err + } + defer fd.Close() + localcrc, err = calCRC64(fd) + if err != nil { + return nil, nil, err + } + } + // filesize=0 , use simple upload + if partNum == 0 || partNum == 1 { + var opt0 *ObjectPutOptions + if opt.OptIni != nil { + opt0 = &ObjectPutOptions{ + opt.OptIni.ACLHeaderOptions, + opt.OptIni.ObjectPutHeaderOptions, + } + } + rsp, err := s.PutFromFile(ctx, name, filepath, opt0) + if err != nil { + return nil, rsp, err + } + result := &CompleteMultipartUploadResult{ + Location: fmt.Sprintf("%s/%s", s.client.BaseURL.BucketURL, name), + Key: name, + ETag: rsp.Header.Get("ETag"), + } + if rsp != nil && s.client.Conf.EnableCRC && !opt.DisableChecksum { + scoscrc := rsp.Header.Get("x-cos-hash-crc64ecma") + icoscrc, _ := strconv.ParseUint(scoscrc, 10, 64) + if icoscrc != localcrc { + return result, rsp, fmt.Errorf("verification failed, want:%v, return:%v", localcrc, icoscrc) + } + } + return result, rsp, nil + } + + var uploadID string + resumableFlag := false + if opt.CheckPoint { + var err error + uploadID, err = s.getResumableUploadID(ctx, name) + if err == nil && uploadID != "" { + err = s.checkUploadedParts(ctx, name, uploadID, filepath, chunks, partNum) + resumableFlag = (err == nil) + } + } + + // 2.Init + optini := opt.OptIni + if !resumableFlag { + res, _, err := s.InitiateMultipartUpload(ctx, name, optini) + if err != nil { + return nil, nil, err + } + uploadID = res.UploadID + } + var poolSize int + if opt.ThreadPoolSize > 0 { + poolSize = opt.ThreadPoolSize + } else { + // Default is one + poolSize = 1 + } + + chjobs := make(chan *Jobs, 100) + chresults := make(chan *Results, 10000) + optcom := &CompleteMultipartUploadOptions{} + + // 3.Start worker + for w := 1; w <= poolSize; w++ { + go worker(ctx, s, chjobs, chresults) + } + + // progress started event + var listener ProgressListener + var consumedBytes int64 + if opt.OptIni != nil { + if opt.OptIni.ObjectPutHeaderOptions != nil { + listener = opt.OptIni.Listener + } + optcom.XOptionHeader, _ = deliverInitOptions(opt.OptIni) + } + event := newProgressEvent(ProgressStartedEvent, 0, 0, totalBytes) + progressCallback(listener, event) + + // 4.Push jobs + go func() { + for _, chunk := range chunks { + if chunk.Done { + continue + } + partOpt := &ObjectUploadPartOptions{} + if optini != nil && optini.ObjectPutHeaderOptions != nil { + partOpt.XCosSSECustomerAglo = optini.XCosSSECustomerAglo + partOpt.XCosSSECustomerKey = optini.XCosSSECustomerKey + partOpt.XCosSSECustomerKeyMD5 = optini.XCosSSECustomerKeyMD5 + partOpt.XCosTrafficLimit = optini.XCosTrafficLimit + } + job := &Jobs{ + Name: name, + RetryTimes: 3, + FilePath: filepath, + UploadId: uploadID, + Chunk: chunk, + Opt: partOpt, + } + chjobs <- job + } + close(chjobs) + }() + + // 5.Recv the resp etag to complete + err = nil + for i := 0; i < partNum; i++ { + if chunks[i].Done { + optcom.Parts = append(optcom.Parts, Object{ + PartNumber: chunks[i].Number, ETag: chunks[i].ETag}, + ) + if err == nil { + consumedBytes += chunks[i].Size + event = newProgressEvent(ProgressDataEvent, chunks[i].Size, consumedBytes, totalBytes) + progressCallback(listener, event) + } + continue + } + res := <-chresults + // Notice one part fail can not get the etag according. + if res.Resp == nil || res.err != nil { + // Some part already fail, can not to get the header inside. + err = fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error()) + continue + } + // Notice one part fail can not get the etag according. + etag := res.Resp.Header.Get("ETag") + optcom.Parts = append(optcom.Parts, Object{ + PartNumber: res.PartNumber, ETag: etag}, + ) + if err == nil { + consumedBytes += chunks[res.PartNumber-1].Size + event = newProgressEvent(ProgressDataEvent, chunks[res.PartNumber-1].Size, consumedBytes, totalBytes) + progressCallback(listener, event) + } + } + close(chresults) + if err != nil { + event = newProgressEvent(ProgressFailedEvent, 0, consumedBytes, totalBytes, err) + progressCallback(listener, event) + return nil, nil, err + } + sort.Sort(ObjectList(optcom.Parts)) + + event = newProgressEvent(ProgressCompletedEvent, 0, consumedBytes, totalBytes) + progressCallback(listener, event) + + v, resp, err := s.CompleteMultipartUpload(context.Background(), name, uploadID, optcom) + if err != nil { + return v, resp, err + } + + if resp != nil && s.client.Conf.EnableCRC && !opt.DisableChecksum { + scoscrc := resp.Header.Get("x-cos-hash-crc64ecma") + icoscrc, err := strconv.ParseUint(scoscrc, 10, 64) + if icoscrc != localcrc { + return v, resp, fmt.Errorf("verification failed, want:%v, return:%v, x-cos-hash-crc64ecma: %v, err:%v", localcrc, icoscrc, scoscrc, err) + } + } + return v, resp, err +} + +func SplitSizeIntoChunks(totalBytes int64, partSize int64) ([]Chunk, int, error) { + var partNum int64 + if partSize > 0 { + if partSize < 1024*1024 { + return nil, 0, errors.New("partSize>=1048576 is required") + } + partNum = totalBytes / partSize + if partNum >= 10000 { + return nil, 0, errors.New("Too manry parts, out of 10000") + } + } else { + partNum, partSize = DividePart(totalBytes, 16) + } + + var chunks []Chunk + var chunk = Chunk{} + for i := int64(0); i < partNum; i++ { + chunk.Number = int(i + 1) + chunk.OffSet = i * partSize + chunk.Size = partSize + chunks = append(chunks, chunk) + } + + if totalBytes%partSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.OffSet = int64(len(chunks)) * partSize + chunk.Size = totalBytes % partSize + chunks = append(chunks, chunk) + partNum++ + } + + return chunks, int(partNum), nil +} + +func (s *ObjectService) checkDownloadedParts(opt *MultiDownloadCPInfo, chfile string, chunks []Chunk) (*MultiDownloadCPInfo, bool) { + var defaultRes MultiDownloadCPInfo + defaultRes = *opt + + fd, err := os.Open(chfile) + // checkpoint 文件不存在 + if err != nil && os.IsNotExist(err) { + // 创建 checkpoint 文件 + fd, _ = os.OpenFile(chfile, os.O_RDONLY|os.O_CREATE|os.O_TRUNC, 0660) + fd.Close() + return &defaultRes, false + } + if err != nil { + return &defaultRes, false + } + defer fd.Close() + + var res MultiDownloadCPInfo + err = json.NewDecoder(fd).Decode(&res) + if err != nil { + return &defaultRes, false + } + // 与COS的文件比较 + if res.CRC64 != opt.CRC64 || res.ETag != opt.ETag || res.Size != opt.Size || res.LastModified != opt.LastModified || len(res.DownloadedBlocks) == 0 { + return &defaultRes, false + } + // len(chunks) 大于1,否则为简单下载, chunks[0].Size为partSize + partSize := chunks[0].Size + for _, v := range res.DownloadedBlocks { + index := v.From / partSize + to := chunks[index].OffSet + chunks[index].Size - 1 + if chunks[index].OffSet != v.From || to != v.To { + // 重置chunks + for i, _ := range chunks { + chunks[i].Done = false + } + return &defaultRes, false + } + chunks[index].Done = true + } + return &res, true +} + +func (s *ObjectService) Download(ctx context.Context, name string, filepath string, opt *MultiDownloadOptions, id ...string) (*Response, error) { + // 参数校验 + if opt == nil { + opt = &MultiDownloadOptions{} + } + if opt.Opt != nil && opt.Opt.Range != "" { + return nil, fmt.Errorf("Download doesn't support Range Options") + } + headOpt := &ObjectHeadOptions{} + if opt.Opt != nil { + headOpt.XCosSSECustomerAglo = opt.Opt.XCosSSECustomerAglo + headOpt.XCosSSECustomerKey = opt.Opt.XCosSSECustomerKey + headOpt.XCosSSECustomerKeyMD5 = opt.Opt.XCosSSECustomerKeyMD5 + headOpt.XOptionHeader = opt.Opt.XOptionHeader + } + resp, err := s.Head(ctx, name, headOpt, id...) + if err != nil { + return resp, err + } + // 获取文件长度和CRC + // 如果对象不存在x-cos-hash-crc64ecma,则跳过不做校验 + coscrc := resp.Header.Get("x-cos-hash-crc64ecma") + strTotalBytes := resp.Header.Get("Content-Length") + totalBytes, err := strconv.ParseInt(strTotalBytes, 10, 64) + if err != nil { + return resp, err + } + + // 切分 + chunks, partNum, err := SplitSizeIntoChunks(totalBytes, opt.PartSize*1024*1024) + if err != nil { + return resp, err + } + // 直接下载到文件 + if partNum == 0 || partNum == 1 { + rsp, err := s.GetToFile(ctx, name, filepath, opt.Opt, id...) + if err != nil { + return rsp, err + } + if coscrc != "" && s.client.Conf.EnableCRC && !opt.DisableChecksum { + icoscrc, _ := strconv.ParseUint(coscrc, 10, 64) + fd, err := os.Open(filepath) + if err != nil { + return rsp, err + } + defer fd.Close() + localcrc, err := calCRC64(fd) + if err != nil { + return rsp, err + } + if localcrc != icoscrc { + return rsp, fmt.Errorf("verification failed, want:%v, return:%v", icoscrc, localcrc) + } + } + return rsp, err + } + // 断点续载 + var resumableFlag bool + var resumableInfo *MultiDownloadCPInfo + var cpfd *os.File + var cpfile string + if opt.CheckPoint { + cpInfo := &MultiDownloadCPInfo{ + LastModified: resp.Header.Get("Last-Modified"), + ETag: resp.Header.Get("ETag"), + CRC64: coscrc, + Size: totalBytes, + } + cpfile = opt.CheckPointFile + if cpfile == "" { + cpfile = fmt.Sprintf("%s.cosresumabletask", filepath) + } + resumableInfo, resumableFlag = s.checkDownloadedParts(cpInfo, cpfile, chunks) + cpfd, err = os.OpenFile(cpfile, os.O_RDWR, 0660) + if err != nil { + return nil, fmt.Errorf("Open CheckPoint File[%v] Failed:%v", cpfile, err) + } + } + if !resumableFlag { + // 创建文件 + nfile, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) + if err != nil { + if cpfd != nil { + cpfd.Close() + } + return resp, err + } + nfile.Close() + } + + var poolSize int + if opt.ThreadPoolSize > 0 { + poolSize = opt.ThreadPoolSize + } else { + poolSize = 1 + } + chjobs := make(chan *Jobs, 100) + chresults := make(chan *Results, 10000) + for w := 1; w <= poolSize; w++ { + go downloadWorker(ctx, s, chjobs, chresults) + } + + go func() { + for _, chunk := range chunks { + if chunk.Done { + continue + } + var downOpt ObjectGetOptions + if opt.Opt != nil { + downOpt = *opt.Opt + downOpt.Listener = nil // listener need to set nil + } + job := &Jobs{ + Name: name, + RetryTimes: 3, + FilePath: filepath, + Chunk: chunk, + DownOpt: &downOpt, + } + if len(id) > 0 { + job.VersionId = append(job.VersionId, id...) + } + chjobs <- job + } + close(chjobs) + }() + err = nil + for i := 0; i < partNum; i++ { + if chunks[i].Done { + continue + } + res := <-chresults + if res.Resp == nil || res.err != nil { + err = fmt.Errorf("part %d get resp Content. error: %s", res.PartNumber, res.err.Error()) + continue + } + // Dump CheckPoint Info + if opt.CheckPoint { + cpfd.Truncate(0) + cpfd.Seek(0, os.SEEK_SET) + resumableInfo.DownloadedBlocks = append(resumableInfo.DownloadedBlocks, DownloadedBlock{ + From: chunks[res.PartNumber-1].OffSet, + To: chunks[res.PartNumber-1].OffSet + chunks[res.PartNumber-1].Size - 1, + }) + json.NewEncoder(cpfd).Encode(resumableInfo) + } + } + close(chresults) + if cpfd != nil { + cpfd.Close() + } + if err != nil { + return nil, err + } + // 下载成功,删除checkpoint文件 + if opt.CheckPoint { + os.Remove(cpfile) + } + if coscrc != "" && s.client.Conf.EnableCRC && !opt.DisableChecksum { + icoscrc, _ := strconv.ParseUint(coscrc, 10, 64) + fd, err := os.Open(filepath) + if err != nil { + return resp, err + } + defer fd.Close() + localcrc, err := calCRC64(fd) + if err != nil { + return resp, err + } + if localcrc != icoscrc { + return resp, fmt.Errorf("verification failed, want:%v, return:%v", icoscrc, localcrc) + } + } + return resp, err +} + +type ObjectPutTaggingOptions struct { + XMLName xml.Name `xml:"Tagging" header:"-"` + TagSet []ObjectTaggingTag `xml:"TagSet>Tag,omitempty" header:"-"` + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} +type ObjectTaggingTag BucketTaggingTag +type ObjectGetTaggingResult ObjectPutTaggingOptions + +func (s *ObjectService) PutTagging(ctx context.Context, name string, opt *ObjectPutTaggingOptions, id ...string) (*Response, error) { + var u string + if len(id) == 1 { + u = fmt.Sprintf("/%s?tagging&versionId=%s", encodeURIComponent(name), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("/%s?tagging", encodeURIComponent(name)) + } else { + return nil, errors.New("wrong params") + } + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPut, + body: opt, + optHeader: opt, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +type ObjectGetTaggingOptions struct { + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` +} + +func (s *ObjectService) GetTagging(ctx context.Context, name string, opt ...interface{}) (*ObjectGetTaggingResult, *Response, error) { + var optHeader *ObjectGetTaggingOptions + u := fmt.Sprintf("/%s?tagging", encodeURIComponent(name)) + if len(opt) > 2 { + return nil, nil, errors.New("wrong params") + } + for _, val := range opt { + if v, ok := val.(string); ok { + u = fmt.Sprintf("%s&versionId=%s", u, v) + } + if v, ok := val.(*ObjectGetTaggingOptions); ok { + optHeader = v + } + } + + var res ObjectGetTaggingResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + optHeader: optHeader, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +func (s *ObjectService) DeleteTagging(ctx context.Context, name string, opt ...interface{}) (*Response, error) { + if len(name) == 0 || name == "/" { + return nil, errors.New("empty object name") + } + var optHeader *ObjectGetTaggingOptions + u := fmt.Sprintf("/%s?tagging", encodeURIComponent(name)) + if len(opt) > 2 { + return nil, errors.New("wrong params") + } + for _, val := range opt { + if v, ok := val.(string); ok { + u = fmt.Sprintf("%s&versionId=%s", u, v) + } + if v, ok := val.(*ObjectGetTaggingOptions); ok { + optHeader = v + } + } + + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodDelete, + optHeader: optHeader, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return resp, err +} + +type PutFetchTaskOptions struct { + Url string `json:"Url,omitempty" header:"-" xml:"-"` + Key string `json:"Key,omitempty" header:"-" xml:"-"` + MD5 string `json:"MD5,omitempty" header:"-" xml:"-"` + OnKeyExist string `json:"OnKeyExist,omitempty" header:"-" xml:"-"` + IgnoreSameKey bool `json:"IgnoreSameKey,omitempty" header:"-" xml:"-"` + SuccessCallbackUrl string `json:"SuccessCallbackUrl,omitempty" header:"-" xml:"-"` + FailureCallbackUrl string `json:"FailureCallbackUrl,omitempty" header:"-" xml:"-"` + XOptionHeader *http.Header `json:"-", xml:"-" header:"-,omitempty"` +} + +type PutFetchTaskResult struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` + RequestId string `json:"request_id,omitempty"` + Data struct { + TaskId string `json:"taskId,omitempty"` + } `json:"Data,omitempty"` +} + +type GetFetchTaskResult struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` + RequestId string `json:"request_id,omitempty"` + Data struct { + Code string `json:"code,omitempty"` + Message string `json:"msg,omitempty"` + Percent int `json:"percent,omitempty"` + Status string `json:"status,omitempty"` + } `json:"data,omitempty"` +} + +type innerFetchTaskHeader struct { + XOptionHeader *http.Header `json:"-", xml:"-" header:"-,omitempty"` +} + +func (s *ObjectService) PutFetchTask(ctx context.Context, bucket string, opt *PutFetchTaskOptions) (*PutFetchTaskResult, *Response, error) { + var buf bytes.Buffer + var res PutFetchTaskResult + if opt == nil { + opt = &PutFetchTaskOptions{} + } + header := innerFetchTaskHeader{ + XOptionHeader: &http.Header{}, + } + if opt.XOptionHeader != nil { + header.XOptionHeader = cloneHeader(opt.XOptionHeader) + } + header.XOptionHeader.Set("Content-Type", "application/json") + bs, err := json.Marshal(opt) + if err != nil { + return nil, nil, err + } + reader := bytes.NewBuffer(bs) + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.FetchURL, + uri: fmt.Sprintf("/%s/", bucket), + method: http.MethodPost, + optHeader: &header, + body: reader, + result: &buf, + } + resp, err := s.client.send(ctx, sendOpt) + if buf.Len() > 0 { + err = json.Unmarshal(buf.Bytes(), &res) + } + return &res, resp, err +} + +func (s *ObjectService) GetFetchTask(ctx context.Context, bucket string, taskid string) (*GetFetchTaskResult, *Response, error) { + var buf bytes.Buffer + var res GetFetchTaskResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.FetchURL, + uri: fmt.Sprintf("/%s/%s", bucket, encodeURIComponent(taskid)), + method: http.MethodGet, + result: &buf, + } + resp, err := s.client.send(ctx, sendOpt) + if buf.Len() > 0 { + err = json.Unmarshal(buf.Bytes(), &res) + } + return &res, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/object_acl.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_acl.go new file mode 100644 index 0000000..00cb690 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_acl.go @@ -0,0 +1,66 @@ +package cos + +import ( + "context" + "net/http" +) + +// ObjectGetACLResult is the result of GetObjectACL +type ObjectGetACLResult = ACLXml + +// GetACL Get Object ACL接口实现使用API读取Object的ACL表,只有所有者有权操作。 +// +// https://www.qcloud.com/document/product/436/7744 +func (s *ObjectService) GetACL(ctx context.Context, name string) (*ObjectGetACLResult, *Response, error) { + var res ObjectGetACLResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name) + "?acl", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + if err == nil { + decodeACL(resp, &res) + } + return &res, resp, err +} + +// ObjectPutACLOptions the options of put object acl +type ObjectPutACLOptions struct { + Header *ACLHeaderOptions `url:"-" xml:"-"` + Body *ACLXml `url:"-" header:"-"` +} + +// PutACL 使用API写入Object的ACL表,您可以通过Header:"x-cos-acl", "x-cos-grant-read" , +// "x-cos-grant-write" ,"x-cos-grant-full-control"传入ACL信息, +// 也可以通过body以XML格式传入ACL信息,但是只能选择Header和Body其中一种,否则,返回冲突。 +// +// Put Object ACL是一个覆盖操作,传入新的ACL将覆盖原有ACL。只有所有者有权操作。 +// +// "x-cos-acl":枚举值为public-read,private;public-read意味这个Object有公有读私有写的权限, +// private意味这个Object有私有读写的权限。 +// +// "x-cos-grant-read":意味被赋予权限的用户拥有该Object的读权限 +// +// "x-cos-grant-write":意味被赋予权限的用户拥有该Object的写权限 +// +// "x-cos-grant-full-control":意味被赋予权限的用户拥有该Object的读写权限 +// +// https://www.qcloud.com/document/product/436/7748 +func (s *ObjectService) PutACL(ctx context.Context, name string, opt *ObjectPutACLOptions) (*Response, error) { + header := opt.Header + body := opt.Body + if body != nil { + header = nil + } + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name) + "?acl", + method: http.MethodPut, + optHeader: header, + body: body, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/object_part.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_part.go new file mode 100644 index 0000000..0bcf3eb --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_part.go @@ -0,0 +1,521 @@ +package cos + +import ( + "context" + "encoding/xml" + "errors" + "fmt" + "hash/crc64" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// InitiateMultipartUploadOptions is the option of InitateMultipartUpload +type InitiateMultipartUploadOptions struct { + *ACLHeaderOptions + *ObjectPutHeaderOptions +} + +// InitiateMultipartUploadResult is the result of InitateMultipartUpload +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// InitiateMultipartUpload 请求实现初始化分片上传,成功执行此请求以后会返回Upload ID用于后续的Upload Part请求。 +// +// https://www.qcloud.com/document/product/436/7746 +func (s *ObjectService) InitiateMultipartUpload(ctx context.Context, name string, opt *InitiateMultipartUploadOptions) (*InitiateMultipartUploadResult, *Response, error) { + var res InitiateMultipartUploadResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/" + encodeURIComponent(name) + "?uploads", + method: http.MethodPost, + optHeader: opt, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// ObjectUploadPartOptions is the options of upload-part +type ObjectUploadPartOptions struct { + Expect string `header:"Expect,omitempty" url:"-"` + XCosContentSHA1 string `header:"x-cos-content-sha1,omitempty" url:"-"` + ContentLength int64 `header:"Content-Length,omitempty" url:"-"` + ContentMD5 string `header:"Content-MD5,omitempty" url:"-"` + XCosSSECustomerAglo string `header:"x-cos-server-side-encryption-customer-algorithm,omitempty" url:"-" xml:"-"` + XCosSSECustomerKey string `header:"x-cos-server-side-encryption-customer-key,omitempty" url:"-" xml:"-"` + XCosSSECustomerKeyMD5 string `header:"x-cos-server-side-encryption-customer-key-MD5,omitempty" url:"-" xml:"-"` + + XCosTrafficLimit int `header:"x-cos-traffic-limit,omitempty" url:"-" xml:"-"` + + XOptionHeader *http.Header `header:"-,omitempty" url:"-" xml:"-"` + // 上传进度, ProgressCompleteEvent不能表示对应API调用成功,API是否调用成功的判断标准为返回err==nil + Listener ProgressListener `header:"-" url:"-" xml:"-"` +} + +// UploadPart 请求实现在初始化以后的分块上传,支持的块的数量为1到10000,块的大小为1 MB 到5 GB。 +// 在每次请求Upload Part时候,需要携带partNumber和uploadID,partNumber为块的编号,支持乱序上传。 +// +// 当传入uploadID和partNumber都相同的时候,后传入的块将覆盖之前传入的块。当uploadID不存在时会返回404错误,NoSuchUpload. +// +// 当 r 不是 bytes.Buffer/bytes.Reader/strings.Reader 时,必须指定 opt.ContentLength +// +// https://www.qcloud.com/document/product/436/7750 +func (s *ObjectService) UploadPart(ctx context.Context, name, uploadID string, partNumber int, r io.Reader, uopt *ObjectUploadPartOptions) (*Response, error) { + if r == nil { + return nil, fmt.Errorf("reader is nil") + } + if err := CheckReaderLen(r); err != nil { + return nil, err + } + // opt 不为 nil + opt := CloneObjectUploadPartOptions(uopt) + totalBytes, err := GetReaderLen(r) + if err != nil && opt.Listener != nil { + if opt.ContentLength == 0 { + return nil, err + } + totalBytes = opt.ContentLength + } + // 分块上传不支持 Chunk 上传 + if err == nil { + // 与 go http 保持一致, 非bytes.Buffer/bytes.Reader/strings.Reader需用户指定ContentLength + if opt != nil && opt.ContentLength == 0 && IsLenReader(r) { + opt.ContentLength = totalBytes + } + } + reader := TeeReader(r, nil, totalBytes, nil) + if s.client.Conf.EnableCRC { + reader.writer = crc64.New(crc64.MakeTable(crc64.ECMA)) + } + if opt != nil && opt.Listener != nil { + reader.listener = opt.Listener + } + u := fmt.Sprintf("/%s?partNumber=%d&uploadId=%s", encodeURIComponent(name), partNumber, uploadID) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPut, + optHeader: opt, + body: reader, + } + resp, err := s.client.send(ctx, &sendOpt) + return resp, err +} + +// ObjectListPartsOptions is the option of ListParts +type ObjectListPartsOptions struct { + EncodingType string `url:"Encoding-type,omitempty"` + MaxParts string `url:"max-parts,omitempty"` + PartNumberMarker string `url:"part-number-marker,omitempty"` +} + +// ObjectListPartsResult is the result of ListParts +type ObjectListPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string + EncodingType string `xml:"Encoding-type,omitempty"` + Key string + UploadID string `xml:"UploadId"` + Initiator *Initiator `xml:"Initiator,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` + StorageClass string + PartNumberMarker string + NextPartNumberMarker string `xml:"NextPartNumberMarker,omitempty"` + MaxParts string + IsTruncated bool + Parts []Object `xml:"Part,omitempty"` +} + +// ListParts 用来查询特定分块上传中的已上传的块。 +// +// https://www.qcloud.com/document/product/436/7747 +func (s *ObjectService) ListParts(ctx context.Context, name, uploadID string, opt *ObjectListPartsOptions) (*ObjectListPartsResult, *Response, error) { + u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) + var res ObjectListPartsResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodGet, + result: &res, + optQuery: opt, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return &res, resp, err +} + +// CompleteMultipartUploadOptions is the option of CompleteMultipartUpload +type CompleteMultipartUploadOptions struct { + XMLName xml.Name `xml:"CompleteMultipartUpload" header:"-" url:"-"` + Parts []Object `xml:"Part" header:"-" url:"-"` + XOptionHeader *http.Header `header:"-,omitempty" xml:"-" url:"-"` +} + +// CompleteMultipartUploadResult is the result CompleteMultipartUpload +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string + Bucket string + Key string + ETag string +} + +// ObjectList can used for sort the parts which needs in complete upload part +// sort.Sort(cos.ObjectList(opt.Parts)) +type ObjectList []Object + +func (o ObjectList) Len() int { + return len(o) +} + +func (o ObjectList) Swap(i, j int) { + o[i], o[j] = o[j], o[i] +} + +func (o ObjectList) Less(i, j int) bool { // rewrite the Less method from small to big + return o[i].PartNumber < o[j].PartNumber +} + +// CompleteMultipartUpload 用来实现完成整个分块上传。当您已经使用Upload Parts上传所有块以后,你可以用该API完成上传。 +// 在使用该API时,您必须在Body中给出每一个块的PartNumber和ETag,用来校验块的准确性。 +// +// 由于分块上传的合并需要数分钟时间,因而当合并分块开始的时候,COS就立即返回200的状态码,在合并的过程中, +// COS会周期性的返回空格信息来保持连接活跃,直到合并完成,COS会在Body中返回合并后块的内容。 +// +// 当上传块小于1 MB的时候,在调用该请求时,会返回400 EntityTooSmall; +// 当上传块编号不连续的时候,在调用该请求时,会返回400 InvalidPart; +// 当请求Body中的块信息没有按序号从小到大排列的时候,在调用该请求时,会返回400 InvalidPartOrder; +// 当UploadId不存在的时候,在调用该请求时,会返回404 NoSuchUpload。 +// +// 建议您及时完成分块上传或者舍弃分块上传,因为已上传但是未终止的块会占用存储空间进而产生存储费用。 +// +// https://www.qcloud.com/document/product/436/7742 +func (s *ObjectService) CompleteMultipartUpload(ctx context.Context, name, uploadID string, opt *CompleteMultipartUploadOptions) (*CompleteMultipartUploadResult, *Response, error) { + u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) + var res CompleteMultipartUploadResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPost, + optHeader: opt, + body: opt, + result: &res, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + // If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. + if err == nil && resp.StatusCode == 200 { + if res.ETag == "" { + return &res, resp, errors.New("response 200 OK, but body contains an error") + } + } + return &res, resp, err +} + +// AbortMultipartUpload 用来实现舍弃一个分块上传并删除已上传的块。当您调用Abort Multipart Upload时, +// 如果有正在使用这个Upload Parts上传块的请求,则Upload Parts会返回失败。当该UploadID不存在时,会返回404 NoSuchUpload。 +// +// 建议您及时完成分块上传或者舍弃分块上传,因为已上传但是未终止的块会占用存储空间进而产生存储费用。 +// +// https://www.qcloud.com/document/product/436/7740 +func (s *ObjectService) AbortMultipartUpload(ctx context.Context, name, uploadID string) (*Response, error) { + if len(name) == 0 || name == "/" { + return nil, errors.New("empty object name") + } + u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodDelete, + } + resp, err := s.client.doRetry(ctx, &sendOpt) + return resp, err +} + +// ObjectCopyPartOptions is the options of copy-part +type ObjectCopyPartOptions struct { + XCosCopySource string `header:"x-cos-copy-source" url:"-"` + XCosCopySourceRange string `header:"x-cos-copy-source-range,omitempty" url:"-"` + XCosCopySourceIfModifiedSince string `header:"x-cos-copy-source-If-Modified-Since,omitempty" url:"-"` + XCosCopySourceIfUnmodifiedSince string `header:"x-cos-copy-source-If-Unmodified-Since,omitempty" url:"-"` + XCosCopySourceIfMatch string `header:"x-cos-copy-source-If-Match,omitempty" url:"-"` + XCosCopySourceIfNoneMatch string `header:"x-cos-copy-source-If-None-Match,omitempty" url:"-"` +} + +// CopyPartResult is the result CopyPart +type CopyPartResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + ETag string + LastModified string +} + +// CopyPart 请求实现在初始化以后的分块上传,支持的块的数量为1到10000,块的大小为1 MB 到5 GB。 +// 在每次请求Upload Part时候,需要携带partNumber和uploadID,partNumber为块的编号,支持乱序上传。 +// ObjectCopyPartOptions的XCosCopySource为必填参数,格式为-.cos..myqcloud.com/ +// ObjectCopyPartOptions的XCosCopySourceRange指定源的Range,格式为bytes=- +// +// 当传入uploadID和partNumber都相同的时候,后传入的块将覆盖之前传入的块。当uploadID不存在时会返回404错误,NoSuchUpload. +// +// https://www.qcloud.com/document/product/436/7750 +func (s *ObjectService) CopyPart(ctx context.Context, name, uploadID string, partNumber int, sourceURL string, opt *ObjectCopyPartOptions) (*CopyPartResult, *Response, error) { + if opt == nil { + opt = &ObjectCopyPartOptions{} + } + opt.XCosCopySource = sourceURL + u := fmt.Sprintf("/%s?partNumber=%d&uploadId=%s", encodeURIComponent(name), partNumber, uploadID) + var res CopyPartResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPut, + optHeader: opt, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + // If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. + if err == nil && resp != nil && resp.StatusCode == 200 { + if res.ETag == "" { + return &res, resp, errors.New("response 200 OK, but body contains an error") + } + } + return &res, resp, err +} + +type ObjectListUploadsOptions struct { + Delimiter string `url:"delimiter,omitempty"` + EncodingType string `url:"encoding-type,omitempty"` + Prefix string `url:"prefix,omitempty"` + MaxUploads int `url:"max-uploads,omitempty"` + KeyMarker string `url:"key-marker,omitempty"` + UploadIdMarker string `url:"upload-id-marker,omitempty"` +} + +type ObjectListUploadsResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket,omitempty"` + EncodingType string `xml:"Encoding-Type,omitempty"` + KeyMarker string `xml:"KeyMarker,omitempty"` + UploadIdMarker string `xml:"UploadIdMarker,omitempty"` + NextKeyMarker string `xml:"NextKeyMarker,omitempty"` + NextUploadIdMarker string `xml:"NextUploadIdMarker,omitempty"` + MaxUploads string `xml:"MaxUploads,omitempty"` + IsTruncated bool `xml:"IsTruncated,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Delimiter string `xml:"Delimiter,omitempty"` + Upload []ListUploadsResultUpload `xml:"Upload,omitempty"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix,omitempty"` +} + +type ListUploadsResultUpload struct { + Key string `xml:"Key,omitempty"` + UploadID string `xml:"UploadId,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + Initiator *Initiator `xml:"Initiator,omitempty"` + Owner *Owner `xml:"Owner,omitempty"` + Initiated string `xml:"Initiated,omitempty"` +} + +func (s *ObjectService) ListUploads(ctx context.Context, opt *ObjectListUploadsOptions) (*ObjectListUploadsResult, *Response, error) { + var res ObjectListUploadsResult + sendOpt := &sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: "/?uploads", + method: http.MethodGet, + optQuery: opt, + result: &res, + } + resp, err := s.client.doRetry(ctx, sendOpt) + return &res, resp, err +} + +type MultiCopyOptions struct { + OptCopy *ObjectCopyOptions + PartSize int64 + ThreadPoolSize int + useMulti bool // use for ut +} + +type CopyJobs struct { + Name string + UploadId string + RetryTimes int + Chunk Chunk + Opt *ObjectCopyPartOptions +} + +type CopyResults struct { + PartNumber int + Resp *Response + err error + res *CopyPartResult +} + +func copyworker(ctx context.Context, s *ObjectService, jobs <-chan *CopyJobs, results chan<- *CopyResults) { + for j := range jobs { + var copyres CopyResults + j.Opt.XCosCopySourceRange = fmt.Sprintf("bytes=%d-%d", j.Chunk.OffSet, j.Chunk.OffSet+j.Chunk.Size-1) + rt := j.RetryTimes + for { + res, resp, err := s.CopyPart(ctx, j.Name, j.UploadId, j.Chunk.Number, j.Opt.XCosCopySource, j.Opt) + copyres.PartNumber = j.Chunk.Number + copyres.Resp = resp + copyres.err = err + copyres.res = res + if err != nil { + rt-- + if rt == 0 { + results <- ©res + break + } + if resp != nil && resp.StatusCode < 499 { + results <- ©res + break + } + time.Sleep(10 * time.Millisecond) + continue + } + results <- ©res + break + } + } +} + +func (s *ObjectService) innerHead(ctx context.Context, sourceURL string, opt *ObjectHeadOptions, id []string) (resp *Response, err error) { + surl := strings.SplitN(sourceURL, "/", 2) + if len(surl) < 2 { + err = errors.New(fmt.Sprintf("sourceURL format error: %s", sourceURL)) + return + } + + u, err := url.Parse(fmt.Sprintf("http://%s", surl[0])) + if err != nil { + return + } + b := &BaseURL{BucketURL: u} + client := NewClient(b, &http.Client{ + Transport: s.client.client.Transport, + }) + if len(id) > 0 { + resp, err = client.Object.Head(ctx, surl[1], nil, id[0]) + } else { + resp, err = client.Object.Head(ctx, surl[1], nil) + } + return +} + +// 如果源对象大于5G,则采用分块复制的方式进行拷贝,此时源对象的元信息如果COPY +func (s *ObjectService) MultiCopy(ctx context.Context, name string, sourceURL string, opt *MultiCopyOptions, id ...string) (*ObjectCopyResult, *Response, error) { + resp, err := s.innerHead(ctx, sourceURL, nil, id) + if err != nil { + return nil, nil, err + } + totalBytes := resp.ContentLength + surl := strings.SplitN(sourceURL, "/", 2) + if len(surl) < 2 { + return nil, nil, errors.New(fmt.Sprintf("x-cos-copy-source format error: %s", sourceURL)) + } + var u string + if len(id) == 1 { + u = fmt.Sprintf("%s/%s?versionId=%s", surl[0], encodeURIComponent(surl[1]), id[0]) + } else if len(id) == 0 { + u = fmt.Sprintf("%s/%s", surl[0], encodeURIComponent(surl[1])) + } else { + return nil, nil, errors.New("wrong params") + } + + if opt == nil { + opt = &MultiCopyOptions{} + } + chunks, partNum, err := SplitSizeIntoChunks(totalBytes, opt.PartSize*1024*1024) + if err != nil { + return nil, nil, err + } + if partNum == 0 || (totalBytes < singleUploadMaxLength && !opt.useMulti) { + if len(id) > 0 { + return s.Copy(ctx, name, sourceURL, opt.OptCopy, id[0]) + } else { + return s.Copy(ctx, name, sourceURL, opt.OptCopy) + } + } + optini := CopyOptionsToMulti(opt.OptCopy) + var uploadID string + res, _, err := s.InitiateMultipartUpload(ctx, name, optini) + if err != nil { + return nil, nil, err + } + uploadID = res.UploadID + + var poolSize int + if opt.ThreadPoolSize > 0 { + poolSize = opt.ThreadPoolSize + } else { + poolSize = 1 + } + + chjobs := make(chan *CopyJobs, 100) + chresults := make(chan *CopyResults, 10000) + optcom := &CompleteMultipartUploadOptions{} + + for w := 1; w <= poolSize; w++ { + go copyworker(ctx, s, chjobs, chresults) + } + + go func() { + for _, chunk := range chunks { + partOpt := &ObjectCopyPartOptions{ + XCosCopySource: u, + } + if opt.OptCopy != nil && opt.OptCopy.ObjectCopyHeaderOptions != nil { + partOpt.XCosCopySourceIfModifiedSince = opt.OptCopy.XCosCopySourceIfModifiedSince + partOpt.XCosCopySourceIfUnmodifiedSince = opt.OptCopy.XCosCopySourceIfUnmodifiedSince + partOpt.XCosCopySourceIfMatch = opt.OptCopy.XCosCopySourceIfMatch + partOpt.XCosCopySourceIfNoneMatch = opt.OptCopy.XCosCopySourceIfNoneMatch + } + job := &CopyJobs{ + Name: name, + RetryTimes: 3, + UploadId: uploadID, + Chunk: chunk, + Opt: partOpt, + } + chjobs <- job + } + close(chjobs) + }() + err = nil + for i := 0; i < partNum; i++ { + res := <-chresults + if res.res == nil || res.err != nil { + err = fmt.Errorf("UploadID %s, part %d failed to get resp content. error: %s", uploadID, res.PartNumber, res.err.Error()) + continue + } + etag := res.res.ETag + optcom.Parts = append(optcom.Parts, Object{ + PartNumber: res.PartNumber, ETag: etag}, + ) + } + close(chresults) + if err != nil { + return nil, nil, err + } + sort.Sort(ObjectList(optcom.Parts)) + + v, resp, err := s.CompleteMultipartUpload(ctx, name, uploadID, optcom) + if err != nil { + s.AbortMultipartUpload(ctx, name, uploadID) + return nil, resp, err + } + cpres := &ObjectCopyResult{ + ETag: v.ETag, + CRC64: resp.Header.Get("x-cos-hash-crc64ecma"), + VersionId: resp.Header.Get("x-cos-version-id"), + } + return cpres, resp, err +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/object_select.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_select.go new file mode 100644 index 0000000..64a4e2f --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/object_select.go @@ -0,0 +1,445 @@ +package cos + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "os" + "time" +) + +type JSONInputSerialization struct { + Type string `xml:"Type,omitempty"` +} + +type CSVInputSerialization struct { + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` + QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter,omitempty"` + AllowQuotedRecordDelimiter string `xml:"AllowQuotedRecordDelimiter,omitempty"` + FileHeaderInfo string `xml:"FileHeaderInfo,omitempty"` + Comments string `xml:"Comments,omitempty"` +} + +type SelectInputSerialization struct { + CompressionType string `xml:"CompressionType,omitempty"` + CSV *CSVInputSerialization `xml:"CSV,omitempty"` + JSON *JSONInputSerialization `xml:"JSON,omitempty"` +} + +type JSONOutputSerialization struct { + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` +} + +type CSVOutputSerialization struct { + QuoteFields string `xml:"QuoteFields,omitempty"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` + QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter,omitempty"` +} + +type SelectOutputSerialization struct { + CSV *CSVOutputSerialization `xml:"CSV,omitempty"` + JSON *JSONOutputSerialization `xml:"JSON,omitempty"` +} + +type ObjectSelectOptions struct { + XMLName xml.Name `xml:"SelectRequest"` + Expression string `xml:"Expression"` + ExpressionType string `xml:"ExpressionType"` + InputSerialization *SelectInputSerialization `xml:"InputSerialization"` + OutputSerialization *SelectOutputSerialization `xml:"OutputSerialization"` + RequestProgress string `xml:"RequestProgress>Enabled,omitempty"` +} + +func (s *ObjectService) Select(ctx context.Context, name string, opt *ObjectSelectOptions) (io.ReadCloser, error) { + u := fmt.Sprintf("/%s?select&select-type=2", encodeURIComponent(name)) + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.BucketURL, + uri: u, + method: http.MethodPost, + body: opt, + disableCloseBody: true, + } + resp, err := s.client.send(ctx, &sendOpt) + if err != nil { + return nil, err + } + result := &ObjectSelectResponse{ + Headers: resp.Header, + Body: resp.Body, + StatusCode: resp.StatusCode, + Frame: &ObjectSelectResult{ + NextFrame: true, + Payload: []byte{}, + }, + Finish: false, + } + + return result, nil +} + +func (s *ObjectService) SelectToFile(ctx context.Context, name, file string, opt *ObjectSelectOptions) (*ObjectSelectResponse, error) { + resp, err := s.Select(ctx, name, opt) + if err != nil { + return nil, err + } + res, _ := resp.(*ObjectSelectResponse) + defer func() { + io.Copy(ioutil.Discard, resp) + resp.Close() + }() + + fd, err := os.OpenFile(file, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.FileMode(0664)) + if err != nil { + return res, err + } + + _, err = io.Copy(fd, resp) + fd.Close() + res.Finish = true + return res, err +} + +const ( + kReadTimeout = 3 + kMessageType = ":message-type" + kEventType = ":event-type" + kContentType = ":content-type" + + kRecordsFrameType = iota + kContinuationFrameType + kProgressFrameType + kStatsFrameType + kEndFrameType + kErrorFrameType +) + +type ProgressFrame struct { + XMLName xml.Name `xml:"Progress"` + BytesScanned int `xml:"BytesScanned"` + BytesProcessed int `xml:"BytesProcessed"` + BytesReturned int `xml:"BytesReturned"` +} + +type StatsFrame struct { + XMLName xml.Name `xml:"Stats"` + BytesScanned int `xml:"BytesScanned"` + BytesProcessed int `xml:"BytesProcessed"` + BytesReturned int `xml:"BytesReturned"` +} + +type DataFrame struct { + ContentType string + ConsumedBytesLength int32 + LeftBytesLength int32 +} + +type ErrorFrame struct { + Code string + Message string +} + +func (e *ErrorFrame) Error() string { + return fmt.Sprintf("Error Code: %s, Error Message: %s", e.Code, e.Message) +} + +type ObjectSelectResult struct { + TotalFrameLength int32 + TotalHeaderLength int32 + NextFrame bool + FrameType int + Payload []byte + DataFrame DataFrame + ProgressFrame ProgressFrame + StatsFrame StatsFrame + ErrorFrame *ErrorFrame +} + +type ObjectSelectResponse struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + Frame *ObjectSelectResult + Finish bool +} + +func (osr *ObjectSelectResponse) Read(p []byte) (n int, err error) { + n, err = osr.readFrames(p) + return +} +func (osr *ObjectSelectResponse) Close() error { + return osr.Body.Close() +} + +func (osr *ObjectSelectResponse) readFrames(p []byte) (int, error) { + if osr.Finish { + return 0, io.EOF + } + if osr.Frame.ErrorFrame != nil { + return 0, osr.Frame.ErrorFrame + } + + var err error + var nlen int + dlen := len(p) + + for nlen < dlen { + if osr.Frame.NextFrame == true { + osr.Frame.NextFrame = false + err := osr.analysisPrelude() + if err != nil { + return nlen, err + } + err = osr.analysisHeader() + if err != nil { + return nlen, err + } + } + switch osr.Frame.FrameType { + case kRecordsFrameType: + n, err := osr.analysisRecords(p[nlen:]) + if err != nil { + return nlen, err + } + nlen += n + case kContinuationFrameType: + err = osr.payloadChecksum("ContinuationFrame") + if err != nil { + return nlen, err + } + case kProgressFrameType: + err := osr.analysisXml(&osr.Frame.ProgressFrame) + if err != nil { + return nlen, err + } + case kStatsFrameType: + err := osr.analysisXml(&osr.Frame.StatsFrame) + if err != nil { + return nlen, err + } + case kEndFrameType: + err = osr.payloadChecksum("EndFrame") + if err != nil { + return nlen, err + } + osr.Finish = true + return nlen, io.EOF + case kErrorFrameType: + return nlen, osr.Frame.ErrorFrame + } + } + return nlen, err +} + +func (osr *ObjectSelectResponse) analysisPrelude() error { + frame := make([]byte, 12) + _, err := osr.fixedLengthRead(frame, kReadTimeout) + if err != nil { + return err + } + + var preludeCRC uint32 + bytesToInt(frame[0:4], &osr.Frame.TotalFrameLength) + bytesToInt(frame[4:8], &osr.Frame.TotalHeaderLength) + bytesToInt(frame[8:12], &preludeCRC) + osr.Frame.Payload = append(osr.Frame.Payload, frame...) + + return checksum(frame[0:8], preludeCRC, "Prelude") +} + +func (osr *ObjectSelectResponse) analysisHeader() error { + var nlen int32 + headers := make(map[string]string) + for nlen < osr.Frame.TotalHeaderLength { + var headerNameLen int8 + var headerValueLen int16 + bHeaderNameLen := make([]byte, 1) + _, err := osr.fixedLengthRead(bHeaderNameLen, kReadTimeout) + if err != nil { + return err + } + nlen += 1 + bytesToInt(bHeaderNameLen, &headerNameLen) + osr.Frame.Payload = append(osr.Frame.Payload, bHeaderNameLen...) + + bHeaderName := make([]byte, headerNameLen) + _, err = osr.fixedLengthRead(bHeaderName, kReadTimeout) + if err != nil { + return err + } + nlen += int32(headerNameLen) + headerName := string(bHeaderName) + osr.Frame.Payload = append(osr.Frame.Payload, bHeaderName...) + + bValueTypeLen := make([]byte, 3) + _, err = osr.fixedLengthRead(bValueTypeLen, kReadTimeout) + if err != nil { + return err + } + nlen += 3 + bytesToInt(bValueTypeLen[1:], &headerValueLen) + osr.Frame.Payload = append(osr.Frame.Payload, bValueTypeLen...) + + bHeaderValue := make([]byte, headerValueLen) + _, err = osr.fixedLengthRead(bHeaderValue, kReadTimeout) + if err != nil { + return err + } + nlen += int32(headerValueLen) + headers[headerName] = string(bHeaderValue) + osr.Frame.Payload = append(osr.Frame.Payload, bHeaderValue...) + } + htype, ok := headers[kMessageType] + if !ok { + return fmt.Errorf("header parse failed, no message-type, headers: %+v\n", headers) + } + switch { + case htype == "error": + osr.Frame.FrameType = kErrorFrameType + osr.Frame.ErrorFrame = &ErrorFrame{} + osr.Frame.ErrorFrame.Code, _ = headers[":error-code"] + osr.Frame.ErrorFrame.Message, _ = headers[":error-message"] + case htype == "event": + hevent, ok := headers[kEventType] + if !ok { + return fmt.Errorf("header parse failed, no event-type, headers: %+v\n", headers) + } + switch { + case hevent == "Records": + hContentType, ok := headers[kContentType] + if ok { + osr.Frame.DataFrame.ContentType = hContentType + } + osr.Frame.FrameType = kRecordsFrameType + case hevent == "Cont": + osr.Frame.FrameType = kContinuationFrameType + case hevent == "Progress": + osr.Frame.FrameType = kProgressFrameType + case hevent == "Stats": + osr.Frame.FrameType = kStatsFrameType + case hevent == "End": + osr.Frame.FrameType = kEndFrameType + default: + return fmt.Errorf("header parse failed, invalid event-type, headers: %+v\n", headers) + } + default: + return fmt.Errorf("header parse failed, invalid message-type: headers: %+v\n", headers) + } + return nil +} + +func (osr *ObjectSelectResponse) analysisRecords(data []byte) (int, error) { + var needReadLength int32 + dlen := int32(len(data)) + restLen := osr.Frame.TotalFrameLength - 16 - osr.Frame.TotalHeaderLength - osr.Frame.DataFrame.ConsumedBytesLength + if dlen <= restLen { + needReadLength = dlen + } else { + needReadLength = restLen + } + n, err := osr.fixedLengthRead(data[:needReadLength], kReadTimeout) + if err != nil { + return n, fmt.Errorf("read data frame error: %s", err.Error()) + } + osr.Frame.DataFrame.ConsumedBytesLength += int32(n) + osr.Frame.Payload = append(osr.Frame.Payload, data[:needReadLength]...) + // 读完了一帧数据并填充到data中了 + if osr.Frame.DataFrame.ConsumedBytesLength == osr.Frame.TotalFrameLength-16-osr.Frame.TotalHeaderLength { + osr.Frame.DataFrame.ConsumedBytesLength = 0 + err = osr.payloadChecksum("RecordFrame") + } + return n, err +} + +func (osr *ObjectSelectResponse) analysisXml(frame interface{}) error { + payloadLength := osr.Frame.TotalFrameLength - 16 - osr.Frame.TotalHeaderLength + bFrame := make([]byte, payloadLength) + _, err := osr.fixedLengthRead(bFrame, kReadTimeout) + if err != nil { + return err + } + err = xml.Unmarshal(bFrame, frame) + if err != nil { + return err + } + osr.Frame.Payload = append(osr.Frame.Payload, bFrame...) + return osr.payloadChecksum("XmlFrame") +} + +// 调用payloadChecksum时,表示该帧已读完,开始读取下一帧内容 +func (osr *ObjectSelectResponse) payloadChecksum(ftype string) error { + bcrc := make([]byte, 4) + _, err := osr.fixedLengthRead(bcrc, kReadTimeout) + if err != nil { + return err + } + var res uint32 + bytesToInt(bcrc, &res) + err = checksum(osr.Frame.Payload, res, ftype) + + osr.Frame.NextFrame = true + osr.Frame.Payload = []byte{} + + return err +} + +type chanReadIO struct { + readLen int + err error +} + +func (osr *ObjectSelectResponse) fixedLengthRead(p []byte, read_timeout int64) (int, error) { + timeout := time.Duration(read_timeout) + r := osr.Body + ch := make(chan chanReadIO, 1) + go func(p []byte) { + var needLen int + readChan := chanReadIO{} + needLen = len(p) + for { + n, err := r.Read(p[readChan.readLen:needLen]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + close(ch) + return + } + + if readChan.readLen == needLen { + break + } + } + ch <- readChan + close(ch) + }(p) + + select { + case <-time.After(time.Second * timeout): + return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", osr.Headers.Get("x-cos-request-id"), timeout, len(p)) + case result := <-ch: + return result.readLen, result.err + } +} + +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} + +func checksum(b []byte, rec uint32, ftype string) error { + c := crc32.ChecksumIEEE(b) + if c != rec { + return fmt.Errorf("parse type: %v, checksum failed, cal: %v, rec: %v\n", ftype, c, rec) + } + return nil +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/progress.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/progress.go new file mode 100644 index 0000000..3fae1fe --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/progress.go @@ -0,0 +1,160 @@ +package cos + +import ( + "fmt" + "hash" + "io" +) + +type ProgressEventType int + +const ( + // 数据开始传输 + ProgressStartedEvent ProgressEventType = iota + // 数据传输中 + ProgressDataEvent + // 数据传输完成, 但不能表示对应API调用完成 + ProgressCompletedEvent + // 只有在数据传输时发生错误才会返回 + ProgressFailedEvent +) + +type ProgressEvent struct { + EventType ProgressEventType + RWBytes int64 + ConsumedBytes int64 + TotalBytes int64 + Err error +} + +func newProgressEvent(eventType ProgressEventType, rwBytes, consumed, total int64, err ...error) *ProgressEvent { + event := &ProgressEvent{ + EventType: eventType, + RWBytes: rwBytes, + ConsumedBytes: consumed, + TotalBytes: total, + } + if len(err) > 0 { + event.Err = err[0] + } + return event +} + +// 用户自定义Listener需要实现该方法 +type ProgressListener interface { + ProgressChangedCallback(event *ProgressEvent) +} + +func progressCallback(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChangedCallback(event) + } +} + +type teeReader struct { + reader io.Reader + writer io.Writer + consumedBytes int64 + totalBytes int64 + listener ProgressListener + disableCheckSum bool +} + +func (r *teeReader) Read(p []byte) (int, error) { + if r.consumedBytes == 0 { + event := newProgressEvent(ProgressStartedEvent, 0, r.consumedBytes, r.totalBytes) + progressCallback(r.listener, event) + } + + n, err := r.reader.Read(p) + if err != nil && err != io.EOF { + event := newProgressEvent(ProgressFailedEvent, 0, r.consumedBytes, r.totalBytes, err) + progressCallback(r.listener, event) + } + if n > 0 { + r.consumedBytes += int64(n) + if r.writer != nil { + if n, err := r.writer.Write(p[:n]); err != nil { + return n, err + } + } + if r.listener != nil { + event := newProgressEvent(ProgressDataEvent, int64(n), r.consumedBytes, r.totalBytes) + progressCallback(r.listener, event) + } + } + + if err == io.EOF { + event := newProgressEvent(ProgressCompletedEvent, int64(n), r.consumedBytes, r.totalBytes) + progressCallback(r.listener, event) + } + + return n, err +} + +func (r *teeReader) Close() error { + if rc, ok := r.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} + +func (r *teeReader) Size() int64 { + return r.totalBytes +} + +func (r *teeReader) Crc64() uint64 { + if r.writer != nil { + if th, ok := r.writer.(hash.Hash64); ok { + return th.Sum64() + } + } + return 0 +} + +func (r *teeReader) Sum() []byte { + if r.writer != nil { + if th, ok := r.writer.(hash.Hash); ok { + return th.Sum(nil) + } + } + return []byte{} +} + +func TeeReader(reader io.Reader, writer io.Writer, total int64, listener ProgressListener) *teeReader { + return &teeReader{ + reader: reader, + writer: writer, + consumedBytes: 0, + totalBytes: total, + listener: listener, + disableCheckSum: false, + } +} + +type FixedLengthReader interface { + io.Reader + Size() int64 +} + +type DefaultProgressListener struct { +} + +func (l *DefaultProgressListener) ProgressChangedCallback(event *ProgressEvent) { + switch event.EventType { + case ProgressStartedEvent: + fmt.Printf("Transfer Start [ConsumedBytes/TotalBytes: %d/%d]\n", + event.ConsumedBytes, event.TotalBytes) + case ProgressDataEvent: + fmt.Printf("\rTransfer Data [ConsumedBytes/TotalBytes: %d/%d, %d%%]", + event.ConsumedBytes, event.TotalBytes, event.ConsumedBytes*100/event.TotalBytes) + case ProgressCompletedEvent: + fmt.Printf("\nTransfer Complete [ConsumedBytes/TotalBytes: %d/%d]\n", + event.ConsumedBytes, event.TotalBytes) + case ProgressFailedEvent: + fmt.Printf("\nTransfer Failed [ConsumedBytes/TotalBytes: %d/%d] [Err: %v]\n", + event.ConsumedBytes, event.TotalBytes, event.Err) + default: + fmt.Printf("Progress Changed Error: unknown progress event type\n") + } +} diff --git a/vendor/github.com/tencentyun/cos-go-sdk-v5/service.go b/vendor/github.com/tencentyun/cos-go-sdk-v5/service.go new file mode 100644 index 0000000..defe938 --- /dev/null +++ b/vendor/github.com/tencentyun/cos-go-sdk-v5/service.go @@ -0,0 +1,35 @@ +package cos + +import ( + "context" + "encoding/xml" + "net/http" +) + +// Service 相关 API +type ServiceService service + +// ServiceGetResult is the result of Get Service +type ServiceGetResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Owner *Owner `xml:"Owner"` + Buckets []Bucket `xml:"Buckets>Bucket,omitempty"` +} + +// Get Service 接口实现获取该用户下所有Bucket列表。 +// +// 该API接口需要使用Authorization签名认证, +// 且只能获取签名中AccessID所属账户的Bucket列表。 +// +// https://www.qcloud.com/document/product/436/8291 +func (s *ServiceService) Get(ctx context.Context) (*ServiceGetResult, *Response, error) { + var res ServiceGetResult + sendOpt := sendOptions{ + baseURL: s.client.BaseURL.ServiceURL, + uri: "/", + method: http.MethodGet, + result: &res, + } + resp, err := s.client.send(ctx, &sendOpt) + return &res, resp, err +} diff --git a/vendor/github.com/upyun/go-sdk/v3/LICENSE b/vendor/github.com/upyun/go-sdk/v3/LICENSE new file mode 100644 index 0000000..e5c3316 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 UPYUN + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/auth.go b/vendor/github.com/upyun/go-sdk/v3/upyun/auth.go new file mode 100644 index 0000000..52d3fa4 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/auth.go @@ -0,0 +1,84 @@ +package upyun + +import ( + "fmt" + "sort" + "strings" +) + +type RESTAuthConfig struct { + Method string + Uri string + DateStr string + LengthStr string +} + +type PurgeAuthConfig struct { + PurgeList string + DateStr string +} + +type UnifiedAuthConfig struct { + Method string + Uri string + DateStr string + Policy string + ContentMD5 string +} + +func (u *UpYun) MakeRESTAuth(config *RESTAuthConfig) string { + sign := []string{ + config.Method, + config.Uri, + config.DateStr, + config.LengthStr, + u.Password, + } + return "UpYun " + u.Operator + ":" + md5Str(strings.Join(sign, "&")) +} + +func (u *UpYun) MakePurgeAuth(config *PurgeAuthConfig) string { + sign := []string{ + config.PurgeList, + u.Bucket, + config.DateStr, + u.Password, + } + return "UpYun " + u.Bucket + ":" + u.Operator + ":" + md5Str(strings.Join(sign, "&")) +} + +func (u *UpYun) MakeFormAuth(policy string) string { + return md5Str(base64ToStr([]byte(policy)) + "&" + u.Secret) +} + +func (u *UpYun) MakeProcessAuth(kwargs map[string]string) string { + keys := []string{} + for k := range kwargs { + keys = append(keys, k) + } + sort.Strings(keys) + + auth := "" + for _, k := range keys { + auth += k + kwargs[k] + } + return fmt.Sprintf("UpYun %s:%s", u.Operator, md5Str(u.Operator+auth+u.Password)) +} + +func (u *UpYun) MakeUnifiedAuth(config *UnifiedAuthConfig) string { + sign := []string{ + config.Method, + config.Uri, + config.DateStr, + config.Policy, + config.ContentMD5, + } + signNoEmpty := []string{} + for _, v := range sign { + if v != "" { + signNoEmpty = append(signNoEmpty, v) + } + } + signStr := base64ToStr(hmacSha1(u.Password, []byte(strings.Join(signNoEmpty, "&")))) + return "UpYun " + u.Operator + ":" + signStr +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/errors.go b/vendor/github.com/upyun/go-sdk/v3/upyun/errors.go new file mode 100644 index 0000000..d8b748f --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/errors.go @@ -0,0 +1,78 @@ +package upyun + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" +) + +type Error struct { + Code int `json:"code"` + Message string `json:"msg"` + RequestID string `json:"id"` + Operation string + StatusCode int + Header http.Header + Body []byte +} + +func (e *Error) Error() string { + if e.Operation == "" { + e.Operation = "upyun api" + } + + return fmt.Sprintf("%s error: status=%d, code=%d, message=%s, request-id=%s", + e.Operation, e.StatusCode, e.Code, e.Message, e.RequestID) +} + +func checkResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + uerr := new(Error) + uerr.StatusCode = res.StatusCode + uerr.Header = res.Header + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return uerr + } + uerr.Body = slurp + json.Unmarshal(slurp, uerr) + return uerr +} + +func checkStatusCode(err error, status int) bool { + if err == nil { + return false + } + + ae, ok := err.(*Error) + return ok && ae.StatusCode == status +} + +func IsNotExist(err error) bool { + return checkStatusCode(err, http.StatusNotFound) +} + +func IsNotModified(err error) bool { + return checkStatusCode(err, http.StatusNotModified) +} + +func IsTooManyRequests(err error) bool { + return checkStatusCode(err, http.StatusTooManyRequests) +} + +func errorOperation(op string, err error) error { + if err == nil { + return errors.New(op) + } + ae, ok := err.(*Error) + if ok { + ae.Operation = op + return ae + } + return fmt.Errorf("%s: %w", op, err) +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/fileinfo.go b/vendor/github.com/upyun/go-sdk/v3/upyun/fileinfo.go new file mode 100644 index 0000000..b908934 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/fileinfo.go @@ -0,0 +1,72 @@ +package upyun + +import ( + "net/http" + "strings" + "time" +) + +type FileInfo struct { + Name string + Size int64 + ContentType string + IsDir bool + IsEmptyDir bool + MD5 string + Time time.Time + + Meta map[string]string + + /* image information */ + ImgType string + ImgWidth int64 + ImgHeight int64 + ImgFrames int64 +} + +/* + Content-Type: image/gif + ETag: "dc9ea7257aa6da18e74505259b04a946" + x-upyun-file-type: GIF + x-upyun-height: 379 + x-upyun-width: 500 + x-upyun-frames: 90 +*/ +func parseHeaderToFileInfo(header http.Header, getinfo bool) *FileInfo { + fInfo := &FileInfo{} + for k, v := range header { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-upyun-meta-") { + if fInfo.Meta == nil { + fInfo.Meta = make(map[string]string) + } + fInfo.Meta[lk] = v[0] + } + } + + if getinfo { + // HTTP HEAD + fInfo.Size = parseStrToInt(header.Get("x-upyun-file-size")) + fInfo.IsDir = header.Get("x-upyun-file-type") == "folder" + fInfo.Time = time.Unix(parseStrToInt(header.Get("x-upyun-file-date")), 0) + fInfo.ContentType = header.Get("Content-Type") + fInfo.MD5 = header.Get("Content-MD5") + } else { + fInfo.Size = parseStrToInt(header.Get("Content-Length")) + fInfo.ContentType = header.Get("Content-Type") + fInfo.MD5 = strings.ReplaceAll(header.Get("Content-Md5"), "\"", "") + if fInfo.MD5 == "" { + fInfo.MD5 = strings.ReplaceAll(header.Get("Etag"), "\"", "") + } + lastM := header.Get("Last-Modified") + t, err := http.ParseTime(lastM) + if err == nil { + fInfo.Time = t + } + fInfo.ImgType = header.Get("x-upyun-file-type") + fInfo.ImgWidth = parseStrToInt(header.Get("x-upyun-width")) + fInfo.ImgHeight = parseStrToInt(header.Get("x-upyun-height")) + fInfo.ImgFrames = parseStrToInt(header.Get("x-upyun-frames")) + } + return fInfo +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/form.go b/vendor/github.com/upyun/go-sdk/v3/upyun/form.go new file mode 100644 index 0000000..7d50555 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/form.go @@ -0,0 +1,148 @@ +package upyun + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "time" +) + +type FormUploadConfig struct { + LocalPath string + SaveKey string + ExpireAfterSec int64 + NotifyUrl string + Apps []map[string]interface{} + Options map[string]interface{} +} + +type FormUploadResp struct { + Code int `json:"code"` + Msg string `json:"message"` + Url string `json:"url"` + Timestamp int64 `json:"time"` + ImgWidth int `json:"image-width"` + ImgHeight int `json:"image-height"` + ImgFrames int `json:"image-frames"` + ImgType string `json:"image-type"` + Sign string `json:"sign"` + Taskids []string `json:"task_ids"` +} + +func (config *FormUploadConfig) Format() { + if config.Options == nil { + config.Options = make(map[string]interface{}) + } + if config.SaveKey != "" { + config.Options["save-key"] = config.SaveKey + } + if config.NotifyUrl != "" { + config.Options["notify-url"] = config.NotifyUrl + } + if config.ExpireAfterSec > 0 { + config.Options["expiration"] = time.Now().Unix() + config.ExpireAfterSec + } + if len(config.Apps) > 0 { + config.Options["apps"] = config.Apps + } +} + +func (up *UpYun) FormUpload(config *FormUploadConfig) (*FormUploadResp, error) { + config.Format() + config.Options["bucket"] = up.Bucket + + args, err := json.Marshal(config.Options) + if err != nil { + return nil, err + } + policy := base64ToStr(args) + + formValues := make(map[string]string) + formValues["policy"] = policy + formValues["file"] = config.LocalPath + + if up.deprecated { + formValues["signature"] = up.MakeFormAuth(policy) + } else { + sign := &UnifiedAuthConfig{ + Method: "POST", + Uri: "/" + up.Bucket, + Policy: policy, + } + if v, ok := config.Options["date"]; ok { + sign.DateStr = v.(string) + } + if v, ok := config.Options["content-md5"]; ok { + sign.ContentMD5 = v.(string) + } + formValues["authorization"] = up.MakeUnifiedAuth(sign) + } + + endpoint := up.doGetEndpoint("v0.api.upyun.com") + url := fmt.Sprintf("http://%s/%s", endpoint, up.Bucket) + resp, err := up.doFormRequest(url, formValues) + if err != nil { + return nil, err + } + + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + + if err != nil { + return nil, errorOperation("form read body", err) + } + + var r FormUploadResp + err = json.Unmarshal(b, &r) + return &r, err +} + +func (up *UpYun) doFormRequest(url string, formValues map[string]string) (*http.Response, error) { + formBody := &bytes.Buffer{} + formWriter := multipart.NewWriter(formBody) + defer formWriter.Close() + + for k, v := range formValues { + if k != "file" { + formWriter.WriteField(k, v) + } + } + + boundary := formWriter.Boundary() + bdBuf := bytes.NewBufferString(fmt.Sprintf("\r\n--%s--\r\n", boundary)) + + fpath := formValues["file"] + fd, err := os.Open(fpath) + if err != nil { + return nil, err + } + defer fd.Close() + + fInfo, err := fd.Stat() + if err != nil { + return nil, err + } + + _, err = formWriter.CreateFormFile("file", filepath.Base(fpath)) + if err != nil { + return nil, err + } + + headers := map[string]string{ + "Content-Type": "multipart/form-data; boundary=" + boundary, + "Content-Length": fmt.Sprint(formBody.Len() + int(fInfo.Size()) + bdBuf.Len()), + } + + body := io.MultiReader(formBody, fd, bdBuf) + resp, err := up.doHTTPRequest("POST", url, headers, body) + if err != nil { + return nil, errorOperation("form", err) + } + return resp, nil +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/http.go b/vendor/github.com/upyun/go-sdk/v3/upyun/http.go new file mode 100644 index 0000000..8347ea3 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/http.go @@ -0,0 +1,82 @@ +package upyun + +import ( + "bytes" + "io" + "net/http" + "os" + "strconv" + "strings" +) + +func (up *UpYun) doHTTPRequest(method, url string, headers map[string]string, + body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + for k, v := range headers { + if strings.ToLower(k) == "host" { + req.Host = v + } else { + req.Header.Set(k, v) + } + } + + req.Header.Set("User-Agent", up.UserAgent) + if method == "PUT" || method == "POST" { + found := false + length := req.Header.Get("Content-Length") + if length != "" { + req.ContentLength, _ = strconv.ParseInt(length, 10, 64) + found = true + } else { + switch v := body.(type) { + case *os.File: + if fInfo, err := v.Stat(); err == nil { + req.ContentLength = fInfo.Size() + found = true + } + case UpYunPutReader: + req.ContentLength = int64(v.Len()) + found = true + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + found = true + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + found = true + case *strings.Reader: + req.ContentLength = int64(v.Len()) + found = true + case *io.LimitedReader: + req.ContentLength = v.N + found = true + } + } + if found && req.ContentLength == 0 { + req.Body = nil + } + } + + // fmt.Printf("%+v\n", req) + + resp, err = up.httpc.Do(req) + if err != nil { + return nil, err + } + err = checkResponse(resp) + if err != nil { + return nil, err + } + return resp, nil +} + +func (up *UpYun) doGetEndpoint(host string) string { + s := up.Hosts[host] + if s != "" { + return s + } + return host +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/io.go b/vendor/github.com/upyun/go-sdk/v3/upyun/io.go new file mode 100644 index 0000000..b0b865f --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/io.go @@ -0,0 +1,78 @@ +package upyun + +import ( + "fmt" + "io" + "os" +) + +type UpYunPutReader interface { + Len() (n int) + MD5() (ret string) + Read([]byte) (n int, err error) + Copyed() (n int) +} + +type fragmentFile struct { + realFile *os.File + offset int64 + limit int64 + cursor int64 +} + +func (f *fragmentFile) Seek(offset int64, whence int) (ret int64, err error) { + switch whence { + case 0: + f.cursor = offset + ret, err = f.realFile.Seek(f.offset+f.cursor, 0) + return ret - f.offset, err + default: + return 0, fmt.Errorf("whence must be 0") + } +} + +func (f *fragmentFile) Read(b []byte) (n int, err error) { + if f.cursor >= f.limit { + return 0, io.EOF + } + n, err = f.realFile.Read(b) + if f.cursor+int64(n) > f.limit { + n = int(f.limit - f.cursor) + } + f.cursor += int64(n) + return n, err +} + +func (f *fragmentFile) Stat() (fInfo os.FileInfo, err error) { + return fInfo, fmt.Errorf("fragmentFile not implement Stat()") +} + +func (f *fragmentFile) Close() error { + return nil +} + +func (f *fragmentFile) Copyed() int { + return int(f.cursor - f.offset) +} + +func (f *fragmentFile) Len() int { + return int(f.limit - f.offset) +} + +func (f *fragmentFile) MD5() string { + s, _ := md5File(f) + return s +} + +func newFragmentFile(file *os.File, offset, limit int64) (*fragmentFile, error) { + f := &fragmentFile{ + realFile: file, + offset: offset, + limit: limit, + } + + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + return f, nil +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/process.go b/vendor/github.com/upyun/go-sdk/v3/upyun/process.go new file mode 100644 index 0000000..8987bdb --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/process.go @@ -0,0 +1,230 @@ +package upyun + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path" + "strings" + "time" +) + +type CommitTasksConfig struct { + AppName string + Accept string + Source string + NotifyUrl string + Tasks []interface{} +} + +type LiveauditCreateTask struct { + Source string + SaveAs string + NotifyUrl string + Interval string + Resize string +} + +type LiveauditCancelTask struct { + TaskId string +} + +type SyncCommonTask struct { + Kwargs map[string]interface{} + TaskUri string +} + +func (up *UpYun) CommitTasks(config *CommitTasksConfig) (taskIds []string, err error) { + b, err := json.Marshal(config.Tasks) + if err != nil { + return nil, err + } + + kwargs := map[string]string{ + "app_name": config.AppName, + "tasks": base64ToStr(b), + "notify_url": config.NotifyUrl, + + // for naga + "source": config.Source, + } + if config.Accept != "" { + kwargs["accept"] = config.Accept + } + + err = up.doProcessRequest("POST", "/pretreatment/", kwargs, &taskIds) + return +} + +func (up *UpYun) GetProgress(taskIds []string) (result map[string]int, err error) { + kwargs := map[string]string{ + "task_ids": strings.Join(taskIds, ","), + } + v := map[string]map[string]int{} + err = up.doProcessRequest("GET", "/status/", kwargs, &v) + if err != nil { + return + } + + if r, ok := v["tasks"]; ok { + return r, err + } + return nil, fmt.Errorf("no tasks") +} + +func (up *UpYun) GetResult(taskIds []string) (result map[string]interface{}, err error) { + kwargs := map[string]string{ + "task_ids": strings.Join(taskIds, ","), + } + v := map[string]map[string]interface{}{} + err = up.doProcessRequest("GET", "/result/", kwargs, &v) + if err != nil { + return + } + + if r, ok := v["tasks"]; ok { + return r, err + } + return nil, fmt.Errorf("no tasks") +} + +func (up *UpYun) doProcessRequest(method, uri string, + kwargs map[string]string, v interface{}) error { + if _, ok := kwargs["service"]; !ok { + kwargs["service"] = up.Bucket + } + + if method == "GET" { + uri = addQueryToUri(uri, kwargs) + } + + headers := make(map[string]string) + headers["Date"] = makeRFC1123Date(time.Now()) + headers["Content-Type"] = "application/x-www-form-urlencoded" + if up.deprecated { + headers["Authorization"] = up.MakeProcessAuth(kwargs) + } else { + headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{ + Method: method, + Uri: uri, + DateStr: headers["Date"], + }) + } + + var resp *http.Response + var err error + endpoint := up.doGetEndpoint("p0.api.upyun.com") + rawurl := fmt.Sprintf("http://%s%s", endpoint, uri) + switch method { + case "GET": + resp, err = up.doHTTPRequest(method, rawurl, headers, nil) + case "POST": + payload := encodeQueryToPayload(kwargs) + resp, err = up.doHTTPRequest(method, rawurl, headers, bytes.NewBufferString(payload)) + default: + return fmt.Errorf("Unknown method") + } + + if err != nil { + return errorOperation("process", err) + } + + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return errorOperation("process read body", err) + } + + return json.Unmarshal(b, v) +} + +func (up *UpYun) CommitSyncTasks(commitTask interface{}) (result map[string]interface{}, err error) { + var kwargs map[string]interface{} + var uri string + var payload string + + switch taskConfig := commitTask.(type) { + case LiveauditCreateTask: + kwargs = map[string]interface{}{ + "source": taskConfig.Source, + "save_as": taskConfig.SaveAs, + "notify_url": taskConfig.NotifyUrl, + "service": up.Bucket, + } + if taskConfig.Interval != "" { + kwargs["interval"] = taskConfig.Interval + } + if taskConfig.Resize != "" { + kwargs["resize"] = taskConfig.Resize + } + uri = path.Join("/", up.Bucket, "/liveaudit/create") + + case LiveauditCancelTask: + kwargs = map[string]interface{}{ + "task_id": taskConfig.TaskId, + "service": up.Bucket, + } + uri = path.Join("/", up.Bucket, "/liveaudit/cancel") + + case SyncCommonTask: + kwargs = taskConfig.Kwargs + uri = path.Join("/", up.Bucket, taskConfig.TaskUri) + + default: + err = fmt.Errorf("don't match any task") + return nil, err + } + + if _, exist := kwargs["service"]; !exist { + kwargs["service"] = up.Bucket + } + + body, err := json.Marshal(kwargs) + if err != nil { + return nil, fmt.Errorf("can't encode the json") + } + payload = string(body) + return up.doSyncProcessRequest("POST", uri, payload) +} + +func (up *UpYun) doSyncProcessRequest(method, uri string, payload string) (map[string]interface{}, error) { + headers := make(map[string]string) + headers["Date"] = makeRFC1123Date(time.Now()) + headers["Content-Type"] = "application/json" + headers["Content-MD5"] = md5Str(payload) + headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{ + Method: method, + Uri: uri, + DateStr: headers["Date"], + ContentMD5: headers["Content-MD5"], + }) + + var resp *http.Response + var err error + endpoint := up.doGetEndpoint("p1.api.upyun.com") + rawurl := fmt.Sprintf("http://%s%s", endpoint, uri) + switch method { + case "POST": + resp, err = up.doHTTPRequest(method, rawurl, headers, strings.NewReader(payload)) + default: + return nil, fmt.Errorf("Unknown method") + } + if err != nil { + return nil, errorOperation("sync process", err) + } + + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + var v map[string]interface{} + err = json.Unmarshal(b, &v) + if err != nil { + fmt.Println("can't unmarshal the data", string(b)) + } + return v, err +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/purge.go b/vendor/github.com/upyun/go-sdk/v3/upyun/purge.go new file mode 100644 index 0000000..37036f9 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/purge.go @@ -0,0 +1,53 @@ +package upyun + +import ( + "encoding/json" + "io/ioutil" + URL "net/url" + "strings" + "time" +) + +// TODO +func (up *UpYun) Purge(urls []string) (fails []string, err error) { + purge := "http://purge.upyun.com/purge/" + date := makeRFC1123Date(time.Now()) + purgeList := unescapeUri(strings.Join(urls, "\n")) + + headers := map[string]string{ + "Date": date, + "Authorization": up.MakePurgeAuth(&PurgeAuthConfig{ + PurgeList: purgeList, + DateStr: date, + }), + "Content-Type": "application/x-www-form-urlencoded;charset=utf-8", + } + + form := make(URL.Values) + form.Add("purge", purgeList) + + body := strings.NewReader(form.Encode()) + resp, err := up.doHTTPRequest("POST", purge, headers, body) + if err != nil { + return fails, errorOperation("purge", err) + } + defer resp.Body.Close() + + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fails, errorOperation("purge read body", err) + } + + result := map[string]interface{}{} + if err := json.Unmarshal(content, &result); err != nil { + return fails, err + } + if it, ok := result["invalid_domain_of_url"]; ok { + if urls, ok := it.([]interface{}); ok { + for _, url := range urls { + fails = append(fails, url.(string)) + } + } + } + return fails, nil +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/rest.go b/vendor/github.com/upyun/go-sdk/v3/upyun/rest.go new file mode 100644 index 0000000..916ba38 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/rest.go @@ -0,0 +1,741 @@ +package upyun + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "path" + "strconv" + "strings" + "time" +) + +const ( + DefaultPartSize = 1024 * 1024 + MaxPartNum = 10000 + minResumePutFileSize = 10 * 1024 * 1024 +) + +type restReqConfig struct { + method string + uri string + query string + headers map[string]string + closeBody bool + httpBody io.Reader + useMD5 bool +} + +// GetObjectConfig provides a configuration to Get method. +type GetObjectConfig struct { + Path string + // Headers contains custom http header, like User-Agent. + Headers map[string]string + LocalPath string + Writer io.Writer +} + +// GetObjectConfig provides a configuration to List method. +type GetObjectsConfig struct { + Path string + Headers map[string]string + ObjectsChan chan *FileInfo + QuitChan chan bool + MaxListObjects int + MaxListTries int + // MaxListLevel: depth of recursion + MaxListLevel int + // DescOrder: whether list objects by desc-order + DescOrder bool + + rootDir string + level int + objNum int + try int +} + +// PutObjectConfig provides a configuration to Put method. +type PutObjectConfig struct { + Path string + LocalPath string + Reader io.Reader + Headers map[string]string + UseMD5 bool + UseResumeUpload bool + // Append Api Deprecated + // AppendContent bool + ResumePartSize int64 + MaxResumePutTries int +} + +type MoveObjectConfig struct { + SrcPath string + DestPath string + Headers map[string]string +} + +type CopyObjectConfig struct { + SrcPath string + DestPath string + Headers map[string]string +} + +// UploadFileConfig is multipart file upload config +type UploadPartConfig struct { + Reader io.Reader + PartSize int64 + PartID int +} +type CompleteMultipartUploadConfig struct { + Md5 string +} +type InitMultipartUploadConfig struct { + Path string + PartSize int64 + ContentLength int64 // optional + ContentType string + OrderUpload bool +} +type InitMultipartUploadResult struct { + UploadID string + Path string + PartSize int64 +} + +type DeleteObjectConfig struct { + Path string + Async bool + Folder bool // optional +} + +type ModifyMetadataConfig struct { + Path string + Operation string + Headers map[string]string +} + +type ListMultipartConfig struct { + Prefix string + Limit int64 +} +type ListMultipartPartsConfig struct { + BeginID int +} +type MultipartUploadFile struct { + Key string `json:"key"` + UUID string `json:"uuid"` + Completed bool `json:"completed"` + CreatedAt int64 `json:"created_at"` +} +type ListMultipartUploadResult struct { + Files []*MultipartUploadFile `json:"files"` +} +type MultipartUploadedPart struct { + Etag string `json:"etag"` + Size int64 `json:"size"` + Id int `json:"id"` +} +type ListUploadedPartsResult struct { + Parts []*MultipartUploadedPart `json:"parts"` +} + +func (up *UpYun) Usage() (n int64, err error) { + var resp *http.Response + resp, err = up.doRESTRequest(&restReqConfig{ + method: "GET", + uri: "/", + query: "usage", + }) + + if err == nil { + n, err = readHTTPBodyToInt(resp) + } + + if err != nil { + return 0, errorOperation("usage", err) + } + return n, nil +} + +func (up *UpYun) Mkdir(path string) error { + _, err := up.doRESTRequest(&restReqConfig{ + method: "POST", + uri: path, + headers: map[string]string{ + "folder": "true", + "x-upyun-folder": "true", + }, + closeBody: true, + }) + if err != nil { + return errorOperation(fmt.Sprintf("mkdir %s", path), err) + } + return nil +} + +// TODO: maybe directory +func (up *UpYun) Get(config *GetObjectConfig) (fInfo *FileInfo, err error) { + if config.LocalPath != "" { + var fd *os.File + if fd, err = os.Create(config.LocalPath); err != nil { + return nil, errorOperation("create file", err) + } + defer fd.Close() + config.Writer = fd + } + + if config.Writer == nil { + return nil, errors.New("no writer") + } + + resp, err := up.doRESTRequest(&restReqConfig{ + method: "GET", + uri: config.Path, + }) + if err != nil { + return nil, errorOperation(fmt.Sprintf("get %s", config.Path), err) + } + defer resp.Body.Close() + + fInfo = parseHeaderToFileInfo(resp.Header, false) + fInfo.Name = config.Path + + if fInfo.Size, err = io.Copy(config.Writer, resp.Body); err != nil { + return nil, errorOperation("io copy", err) + } + return +} + +func (up *UpYun) put(config *PutObjectConfig) error { + /* Append Api Deprecated + if config.AppendContent { + if config.Headers == nil { + config.Headers = make(map[string]string) + } + config.Headers["X-Upyun-Append"] = "true" + } + */ + _, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: config.Path, + headers: config.Headers, + closeBody: true, + httpBody: config.Reader, + useMD5: config.UseMD5, + }) + if err != nil { + return errorOperation(fmt.Sprintf("put %s", config.Path), err) + } + return nil +} + +func getPartInfo(partSize, fsize int64) (int64, int64, error) { + if partSize <= 0 { + partSize = DefaultPartSize + } + if partSize < DefaultPartSize { + return 0, 0, fmt.Errorf("The minimum of part size is %d", DefaultPartSize) + } + if partSize%DefaultPartSize != 0 { + return 0, 0, fmt.Errorf("The part size is a multiple of %d", DefaultPartSize) + } + + partNum := (fsize + partSize - 1) / partSize + if partNum > MaxPartNum { + return 0, 0, fmt.Errorf("The maximum part number is %d", MaxPartNum) + } + return partSize, partNum, nil +} +func (up *UpYun) resumePut(config *PutObjectConfig) error { + f, ok := config.Reader.(*os.File) + if !ok { + return errors.New("resumePut: type != *os.File") + } + + fileinfo, err := f.Stat() + if err != nil { + return errorOperation("stat", err) + } + + fsize := fileinfo.Size() + if fsize < minResumePutFileSize { + return up.put(config) + } + + if config.ResumePartSize == 0 { + config.ResumePartSize = DefaultPartSize + } + maxPartID := int((fsize+config.ResumePartSize-1)/config.ResumePartSize - 1) + + if config.Headers == nil { + config.Headers = make(map[string]string) + } + + curSize, partSize := int64(0), config.ResumePartSize + headers := config.Headers + uploadInfo, err := up.InitMultipartUpload(&InitMultipartUploadConfig{ + Path: config.Path, + PartSize: partSize, + ContentType: headers["Content-Type"], + ContentLength: fsize, + OrderUpload: true, + }) + if err != nil { + return err + } + + for id := 0; id <= maxPartID; id++ { + if curSize+partSize > fsize { + partSize = fsize - curSize + } + fragFile, err := newFragmentFile(f, curSize, partSize) + if err != nil { + return errorOperation("new fragment file", err) + } + + try := 0 + for ; config.MaxResumePutTries == 0 || try < config.MaxResumePutTries; try++ { + err = up.UploadPart(uploadInfo, &UploadPartConfig{ + PartID: id, + PartSize: partSize, + Reader: fragFile, + }) + if err == nil { + break + } + fragFile.Seek(0, 0) + } + + if config.MaxResumePutTries > 0 && try == config.MaxResumePutTries { + return err + } + curSize += partSize + } + + completeConfig := &CompleteMultipartUploadConfig{} + if config.UseMD5 { + f.Seek(0, 0) + completeConfig.Md5, _ = md5File(f) + } + return up.CompleteMultipartUpload(uploadInfo, completeConfig) +} + +func (up *UpYun) Put(config *PutObjectConfig) (err error) { + if config.LocalPath != "" { + var fd *os.File + if fd, err = os.Open(config.LocalPath); err != nil { + return errorOperation("open file", err) + } + defer fd.Close() + config.Reader = fd + } + + if config.UseResumeUpload { + return up.resumePut(config) + } + return up.put(config) +} + +func (up *UpYun) Move(config *MoveObjectConfig) error { + headers := map[string]string{ + "X-Upyun-Move-Source": path.Join("/", up.Bucket, escapeUri(config.SrcPath)), + } + for k, v := range config.Headers { + headers[k] = v + } + _, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: config.DestPath, + headers: headers, + }) + if err != nil { + return errorOperation("move source", err) + } + return nil +} + +func (up *UpYun) Copy(config *CopyObjectConfig) error { + headers := map[string]string{ + "X-Upyun-Copy-Source": path.Join("/", up.Bucket, escapeUri(config.SrcPath)), + } + for k, v := range config.Headers { + headers[k] = v + } + _, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: config.DestPath, + headers: headers, + }) + if err != nil { + return errorOperation("copy source", err) + } + return nil +} + +func (up *UpYun) InitMultipartUpload(config *InitMultipartUploadConfig) (*InitMultipartUploadResult, error) { + partSize, _, err := getPartInfo(config.PartSize, config.ContentLength) + if err != nil { + return nil, errorOperation("init multipart", err) + } + headers := make(map[string]string) + headers["X-Upyun-Multi-Type"] = config.ContentType + if config.ContentLength > 0 { + headers["X-Upyun-Multi-Length"] = strconv.FormatInt(config.ContentLength, 10) + } + headers["X-Upyun-Multi-Stage"] = "initiate" + if !config.OrderUpload { + headers["X-Upyun-Multi-Disorder"] = "true" + } + headers["X-Upyun-Multi-Part-Size"] = strconv.FormatInt(partSize, 10) + resp, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: config.Path, + headers: headers, + closeBody: true, + }) + if err != nil { + return nil, errorOperation("init multipart", err) + } + return &InitMultipartUploadResult{ + UploadID: resp.Header.Get("X-Upyun-Multi-Uuid"), + Path: config.Path, + PartSize: partSize, + }, nil +} +func (up *UpYun) UploadPart(initResult *InitMultipartUploadResult, part *UploadPartConfig) error { + headers := make(map[string]string) + headers["X-Upyun-Multi-Stage"] = "upload" + headers["X-Upyun-Multi-Uuid"] = initResult.UploadID + headers["X-Upyun-Part-Id"] = strconv.FormatInt(int64(part.PartID), 10) + headers["Content-Length"] = strconv.FormatInt(part.PartSize, 10) + + _, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: initResult.Path, + headers: headers, + closeBody: true, + useMD5: false, + httpBody: part.Reader, + }) + if err != nil { + return errorOperation("upload multipart", err) + } + return nil +} +func (up *UpYun) CompleteMultipartUpload(initResult *InitMultipartUploadResult, config *CompleteMultipartUploadConfig) error { + headers := make(map[string]string) + headers["X-Upyun-Multi-Stage"] = "complete" + headers["X-Upyun-Multi-Uuid"] = initResult.UploadID + if config != nil { + if config.Md5 != "" { + headers["X-Upyun-Multi-Md5"] = config.Md5 + } + } + _, err := up.doRESTRequest(&restReqConfig{ + method: "PUT", + uri: initResult.Path, + headers: headers, + }) + if err != nil { + return errorOperation("complete multipart", err) + } + return nil +} +func (up *UpYun) ListMultipartUploads(config *ListMultipartConfig) (*ListMultipartUploadResult, error) { + headers := make(map[string]string) + headers["X-Upyun-List-Type"] = "multi" + if config.Prefix != "" { + headers["X-Upyun-List-Prefix"] = base64.StdEncoding.EncodeToString([]byte(config.Prefix)) + } + if config.Limit > 0 { + headers["X-Upyun-List-Limit"] = strconv.FormatInt(config.Limit, 10) + } + + res, err := up.doRESTRequest(&restReqConfig{ + method: "GET", + headers: headers, + uri: "/", + closeBody: false, + useMD5: false, + }) + if err != nil { + return nil, errorOperation("list multipart", err) + } + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, errorOperation("list multipart read body", err) + } + + result := &ListMultipartUploadResult{} + err = json.Unmarshal(body, result) + if err != nil { + return nil, errorOperation("list multipart read body", err) + } + return result, nil +} + +func (up *UpYun) ListMultipartParts(intiResult *InitMultipartUploadResult, config *ListMultipartPartsConfig) (*ListUploadedPartsResult, error) { + headers := make(map[string]string) + headers["X-Upyun-Multi-Uuid"] = intiResult.UploadID + + if config.BeginID > 0 { + headers["X-Upyun-Part-Id"] = fmt.Sprint(config.BeginID) + } + res, err := up.doRESTRequest(&restReqConfig{ + method: "GET", + headers: headers, + uri: intiResult.Path, + closeBody: false, + useMD5: false, + }) + if err != nil { + return nil, errorOperation("list multipart parts", err) + } + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, errorOperation("list multipart parts read body", err) + } + + result := &ListUploadedPartsResult{} + err = json.Unmarshal(body, result) + if err != nil { + return nil, errorOperation("list multipart parts read body", err) + } + return result, nil +} +func (up *UpYun) Delete(config *DeleteObjectConfig) error { + headers := map[string]string{} + if config.Async { + headers["x-upyun-async"] = "true" + } + if config.Folder { + headers["x-upyun-folder"] = "true" + } + _, err := up.doRESTRequest(&restReqConfig{ + method: "DELETE", + uri: config.Path, + headers: headers, + closeBody: true, + }) + if err != nil { + return errorOperation("delete", err) + } + return nil +} + +func (up *UpYun) GetInfo(path string) (*FileInfo, error) { + resp, err := up.doRESTRequest(&restReqConfig{ + method: "HEAD", + uri: path, + closeBody: true, + }) + if err != nil { + return nil, errorOperation("get info", err) + } + fInfo := parseHeaderToFileInfo(resp.Header, true) + fInfo.Name = path + return fInfo, nil +} + +func (up *UpYun) List(config *GetObjectsConfig) error { + if config.ObjectsChan == nil { + return errors.New("ObjectsChan is nil") + } + if config.Headers == nil { + config.Headers = make(map[string]string) + } + if config.QuitChan == nil { + config.QuitChan = make(chan bool) + } + // 50 is nice value + if _, exist := config.Headers["X-List-Limit"]; !exist { + config.Headers["X-List-Limit"] = "50" + } + + if config.DescOrder { + config.Headers["X-List-Order"] = "desc" + } + + config.Headers["X-UpYun-Folder"] = "true" + config.Headers["Accept"] = "application/json" + + // 1st level + if config.level == 0 { + defer close(config.ObjectsChan) + } + + for { + resp, err := up.doRESTRequest(&restReqConfig{ + method: "GET", + uri: config.Path, + headers: config.Headers, + }) + + if err != nil { + var nerr net.Error + if ok := errors.As(err, &nerr); ok { + config.try++ + if config.MaxListTries == 0 || config.try < config.MaxListTries { + time.Sleep(10 * time.Millisecond) + continue + } + } + return errorOperation("list", err) + } + + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return errorOperation("list read body", err) + } + + iter, files, err := parseBodyToFileInfos(b) + if err != nil { + return errorOperation("list read body", err) + } + for _, fInfo := range files { + if fInfo.IsDir && (config.level+1 < config.MaxListLevel || config.MaxListLevel == -1) { + rConfig := &GetObjectsConfig{ + Path: path.Join(config.Path, fInfo.Name), + QuitChan: config.QuitChan, + ObjectsChan: config.ObjectsChan, + MaxListTries: config.MaxListTries, + MaxListObjects: config.MaxListObjects, + DescOrder: config.DescOrder, + MaxListLevel: config.MaxListLevel, + level: config.level + 1, + rootDir: path.Join(config.rootDir, fInfo.Name), + try: config.try, + objNum: config.objNum, + } + if err = up.List(rConfig); err != nil { + return err + } + // empty folder + if config.objNum == rConfig.objNum { + fInfo.IsEmptyDir = true + } + config.try, config.objNum = rConfig.try, rConfig.objNum + } + if config.rootDir != "" { + fInfo.Name = path.Join(config.rootDir, fInfo.Name) + } + + select { + case <-config.QuitChan: + return nil + default: + config.ObjectsChan <- fInfo + } + + config.objNum++ + if config.MaxListObjects > 0 && config.objNum >= config.MaxListObjects { + return nil + } + } + + if iter == "g2gCZAAEbmV4dGQAA2VvZg" { + return nil + } + config.Headers["X-List-Iter"] = iter + } +} + +func (up *UpYun) ModifyMetadata(config *ModifyMetadataConfig) error { + if config.Operation == "" { + config.Operation = "merge" + } + _, err := up.doRESTRequest(&restReqConfig{ + method: "PATCH", + uri: config.Path, + query: "metadata=" + config.Operation, + headers: config.Headers, + closeBody: true, + }) + if err != nil { + return errorOperation("modify metadata", err) + } + return nil +} + +func (up *UpYun) doRESTRequest(config *restReqConfig) (*http.Response, error) { + escUri := path.Join("/", up.Bucket, escapeUri(config.uri)) + if strings.HasSuffix(config.uri, "/") { + escUri += "/" + } + if config.query != "" { + escUri += "?" + config.query + } + + headers := map[string]string{} + hasMD5 := false + for k, v := range config.headers { + if strings.ToLower(k) == "content-md5" && v != "" { + hasMD5 = true + } + headers[k] = v + } + + headers["Date"] = makeRFC1123Date(time.Now()) + headers["Host"] = "v0.api.upyun.com" + + if !hasMD5 && config.useMD5 { + switch v := config.httpBody.(type) { + case *os.File: + headers["Content-MD5"], _ = md5File(v) + case UpYunPutReader: + headers["Content-MD5"] = v.MD5() + } + } + + if up.deprecated { + if _, ok := headers["Content-Length"]; !ok { + size := int64(0) + switch v := config.httpBody.(type) { + case *os.File: + if fInfo, err := v.Stat(); err == nil { + size = fInfo.Size() + } + case UpYunPutReader: + size = int64(v.Len()) + } + headers["Content-Length"] = fmt.Sprint(size) + } + headers["Authorization"] = up.MakeRESTAuth(&RESTAuthConfig{ + Method: config.method, + Uri: escUri, + DateStr: headers["Date"], + LengthStr: headers["Content-Length"], + }) + } else { + headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{ + Method: config.method, + Uri: escUri, + DateStr: headers["Date"], + ContentMD5: headers["Content-MD5"], + }) + } + + endpoint := up.doGetEndpoint("v0.api.upyun.com") + url := fmt.Sprintf("http://%s%s", endpoint, escUri) + + resp, err := up.doHTTPRequest(config.method, url, headers, config.httpBody) + if err != nil { + return nil, err + } + + if config.closeBody { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + + return resp, nil +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/upyun.go b/vendor/github.com/upyun/go-sdk/v3/upyun/upyun.go new file mode 100644 index 0000000..b63e123 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/upyun.go @@ -0,0 +1,61 @@ +package upyun + +import ( + "net" + "net/http" + "time" +) + +const ( + version = "3.0.1" + + defaultChunkSize = 32 * 1024 + defaultConnectTimeout = time.Second * 60 +) + +type UpYunConfig struct { + Bucket string + Operator string + Password string + Secret string // deprecated + Hosts map[string]string + UserAgent string +} + +type UpYun struct { + UpYunConfig + httpc *http.Client + deprecated bool +} + +func NewUpYun(config *UpYunConfig) *UpYun { + up := &UpYun{} + up.Bucket = config.Bucket + up.Operator = config.Operator + up.Password = md5Str(config.Password) + up.Secret = config.Secret + up.Hosts = config.Hosts + if config.UserAgent != "" { + up.UserAgent = config.UserAgent + } else { + up.UserAgent = makeUserAgent(version) + } + + up.httpc = &http.Client{ + Transport: &http.Transport{ + Dial: func(network, addr string) (c net.Conn, err error) { + return net.DialTimeout(network, addr, defaultConnectTimeout) + }, + }, + } + + return up +} + +func (up *UpYun) SetHTTPClient(httpc *http.Client) { + up.httpc = httpc +} + +func (up *UpYun) UseDeprecatedApi() { + up.deprecated = true +} diff --git a/vendor/github.com/upyun/go-sdk/v3/upyun/utils.go b/vendor/github.com/upyun/go-sdk/v3/upyun/utils.go new file mode 100644 index 0000000..479b408 --- /dev/null +++ b/vendor/github.com/upyun/go-sdk/v3/upyun/utils.go @@ -0,0 +1,234 @@ +package upyun + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +var ( + escape []uint32 = []uint32{ + 0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */ + + /* ?>=< ;:98 7654 3210 /.-, +*)( '&%$ #"! */ + 0xfc001fff, /* 1111 1100 0000 0000 0001 1111 1111 1111 */ + + /* _^]\ [ZYX WVUT SRQP ONML KJIH GFED CBA@ */ + 0x78000001, /* 0111 1000 0000 0000 0000 0000 0000 0001 */ + + /* ~}| {zyx wvut srqp onml kjih gfed cba` */ + 0xb8000001, /* 1011 1000 0000 0000 0000 0000 0000 0001 */ + + 0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */ + 0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */ + 0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */ + 0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */ + } + hexMap = "0123456789ABCDEF" +) + +func makeRFC1123Date(d time.Time) string { + utc := d.UTC().Format(time.RFC1123) + return strings.ReplaceAll(utc, "UTC", "GMT") +} + +func makeUserAgent(version string) string { + return fmt.Sprintf("UPYUN Go SDK V2/%s", version) +} + +func md5Str(s string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(s))) +} + +func base64ToStr(b []byte) string { + return base64.StdEncoding.EncodeToString(b) +} + +func hmacSha1(key string, data []byte) []byte { + hm := hmac.New(sha1.New, []byte(key)) + hm.Write(data) + return hm.Sum(nil) +} + +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +func escapeUri(s string) string { + size := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if escape[c>>5]&(1<<(c&0x1f)) > 0 { + size += 3 + } else { + size++ + } + } + + ret := make([]byte, size) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if escape[c>>5]&(1<<(c&0x1f)) > 0 { + ret[j] = '%' + ret[j+1] = hexMap[c>>4] + ret[j+2] = hexMap[c&0xf] + j += 3 + } else { + ret[j] = c + j += 1 + } + } + return string(ret) +} + +func unescapeUri(s string) string { + n := 0 + for i := 0; i < len(s); n++ { + switch s[i] { + case '%': + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + // if not correct, return original string + return s + } + i += 3 + default: + i++ + } + } + + t := make([]byte, n) + j := 0 + for i := 0; i < len(s); j++ { + switch s[i] { + case '%': + t[j] = unhex(s[i+1])<<4 | unhex(s[i+2]) + i += 3 + default: + t[j] = s[i] + i++ + } + } + return string(t) +} + +var readHTTPBody = ioutil.ReadAll + +func readHTTPBodyToStr(resp *http.Response) (string, error) { + b, err := readHTTPBody(resp.Body) + resp.Body.Close() + if err != nil { + return "", errorOperation("read body", err) + } + return string(b), nil +} + +func addQueryToUri(rawurl string, kwargs map[string]string) string { + u, _ := url.ParseRequestURI(rawurl) + q := u.Query() + for k, v := range kwargs { + q.Add(k, v) + } + u.RawQuery = q.Encode() + return u.String() +} + +func encodeQueryToPayload(kwargs map[string]string) string { + payload := url.Values{} + for k, v := range kwargs { + payload.Set(k, v) + } + return payload.Encode() +} + +func readHTTPBodyToInt(resp *http.Response) (int64, error) { + b, err := readHTTPBody(resp.Body) + resp.Body.Close() + if err != nil { + return 0, errorOperation("read body", err) + } + + n, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return 0, errorOperation("parse int", err) + } + return n, nil +} + +func parseStrToInt(s string) int64 { + n, _ := strconv.ParseInt(s, 10, 64) + return n +} + +func md5File(f io.ReadSeeker) (string, error) { + offset, _ := f.Seek(0, 0) + defer f.Seek(offset, 0) + hash := md5.New() + if _, err := io.Copy(hash, f); err != nil { + return "", err + } + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +type JsonFileInfo struct { + ContentType string `json:"type"` + Name string `json:"name"` + Length int64 `json:"length"` + LastModified int64 `json:"last_modified"` +} +type JsonFiles struct { + Files []*JsonFileInfo `json:"files"` + Iter string `json:"iter"` +} + +func parseBodyToFileInfos(b []byte) (iter string, fInfos []*FileInfo, err error) { + files := &JsonFiles{} + err = json.Unmarshal(b, files) + if err != nil { + return + } + + iter = files.Iter + fInfos = make([]*FileInfo, len(files.Files)) + + for i, f := range files.Files { + fInfos[i] = &FileInfo{ + Name: f.Name, + IsDir: f.ContentType == "folder", + ContentType: f.ContentType, + Size: f.Length, + Time: time.Unix(f.LastModified, 0), + } + } + return +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000..690eb85 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,212 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000..12e813c --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,419 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing + select { + case <-ch: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } else if lim.limit == 0 { + var ok bool + if lim.burst >= n { + ok = true + lim.burst -= n + } + return Reservation{ + ok: ok, + lim: lim, + tokens: lim.burst, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Calculate the new number of tokens, due to time that passed. + elapsed := now.Sub(last) + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } + seconds := tokens / float64(limit) + return time.Duration(float64(time.Second) * seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } + return d.Seconds() * float64(limit) +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..4a42caa --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,125 @@ +# github.com/aliyun/aliyun-oss-go-sdk v2.2.4+incompatible +## explicit +github.com/aliyun/aliyun-oss-go-sdk/oss +# github.com/aws/aws-sdk-go v1.44.66 +## explicit; go 1.11 +github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/awserr +github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/client +github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3shared +github.com/aws/aws-sdk-go/internal/s3shared/arn +github.com/aws/aws-sdk-go/internal/s3shared/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum +github.com/aws/aws-sdk-go/private/protocol +github.com/aws/aws-sdk-go/private/protocol/eventstream +github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson +github.com/aws/aws-sdk-go/private/protocol/restxml +github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/baidubce/bce-sdk-go v0.9.131 +## explicit; go 1.11 +github.com/baidubce/bce-sdk-go/auth +github.com/baidubce/bce-sdk-go/bce +github.com/baidubce/bce-sdk-go/http +github.com/baidubce/bce-sdk-go/services/bos +github.com/baidubce/bce-sdk-go/services/bos/api +github.com/baidubce/bce-sdk-go/util +github.com/baidubce/bce-sdk-go/util/log +# github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f +## explicit +# github.com/clbanning/mxj v1.8.4 +## explicit +github.com/clbanning/mxj +# github.com/deckarep/golang-set v1.8.0 +## explicit; go 1.17 +github.com/deckarep/golang-set +# github.com/google/go-querystring v1.1.0 +## explicit; go 1.10 +github.com/google/go-querystring/query +# github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible +## explicit +github.com/huaweicloud/huaweicloud-sdk-go-obs/obs +# github.com/jmespath/go-jmespath v0.4.0 +## explicit; go 1.14 +github.com/jmespath/go-jmespath +# github.com/ks3sdklib/aws-sdk-go v1.1.6 +## explicit; go 1.16 +github.com/ks3sdklib/aws-sdk-go/aws +github.com/ks3sdklib/aws-sdk-go/aws/awserr +github.com/ks3sdklib/aws-sdk-go/aws/awsutil +github.com/ks3sdklib/aws-sdk-go/aws/credentials +github.com/ks3sdklib/aws-sdk-go/internal/apierr +github.com/ks3sdklib/aws-sdk-go/internal/endpoints +github.com/ks3sdklib/aws-sdk-go/internal/protocol/query +github.com/ks3sdklib/aws-sdk-go/internal/protocol/query/queryutil +github.com/ks3sdklib/aws-sdk-go/internal/protocol/rest +github.com/ks3sdklib/aws-sdk-go/internal/protocol/restxml +github.com/ks3sdklib/aws-sdk-go/internal/protocol/xml/xmlutil +github.com/ks3sdklib/aws-sdk-go/internal/signer/v2 +github.com/ks3sdklib/aws-sdk-go/service/s3 +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure +# github.com/mozillazg/go-httpheader v0.3.1 +## explicit; go 1.11 +github.com/mozillazg/go-httpheader +# github.com/qiniu/go-sdk/v7 v7.13.0 +## explicit; go 1.14 +github.com/qiniu/go-sdk/v7 +github.com/qiniu/go-sdk/v7/auth +github.com/qiniu/go-sdk/v7/auth/qbox +github.com/qiniu/go-sdk/v7/client +github.com/qiniu/go-sdk/v7/conf +github.com/qiniu/go-sdk/v7/internal/log +github.com/qiniu/go-sdk/v7/reqid +github.com/qiniu/go-sdk/v7/storage +# github.com/satori/go.uuid v1.2.0 +## explicit +# github.com/tencentyun/cos-go-sdk-v5 v0.7.36 +## explicit; go 1.12 +github.com/tencentyun/cos-go-sdk-v5 +# github.com/upyun/go-sdk/v3 v3.0.3 +## explicit; go 1.13 +github.com/upyun/go-sdk/v3/upyun +# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 +## explicit +golang.org/x/sync/singleflight +# golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 +## explicit +golang.org/x/time/rate