master v1.0.37
李光春 2 years ago
parent d1cda7f385
commit 04c6685b75

3
.gitignore vendored

@ -5,4 +5,5 @@
.vscode
*.log
gomod.sh
*_test.go
*_test.go
/vendor/

@ -21,7 +21,7 @@ type ClientConfig struct {
MessageToken string
MessageKey string
RedisClient *dorm.RedisClient // 缓存数据库
apiGormClientFun golog.ApiClientFun // 日志配置
ApiGormClientFun golog.ApiClientFun // 日志配置
Debug bool // 日志开关
ZapLog *golog.ZapLog // 日志服务
RedisCachePrefixFun redisCachePrefixFun // 缓存前缀
@ -74,7 +74,7 @@ func NewClient(config *ClientConfig) (*Client, error) {
c.requestClient = gorequest.NewHttp()
apiGormClient := config.apiGormClientFun()
apiGormClient := config.ApiGormClientFun()
if apiGormClient != nil {
c.log.client = apiGormClient
c.log.gorm = true

@ -3,5 +3,5 @@ package wechatopen
const (
apiUrl = "https://api.weixin.qq.com"
LogTable = "wechatopen"
Version = "1.0.36"
Version = "1.0.37"
)

@ -120,7 +120,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gorm.io/datatypes v1.0.7 // indirect
gorm.io/driver/mysql v1.3.6 // indirect
gorm.io/driver/postgres v1.3.9 // indirect
gorm.io/driver/postgres v1.3.10 // indirect
gorm.io/gorm v1.23.9 // indirect
mellium.im/sasl v0.3.0 // indirect
xorm.io/builder v0.3.12 // indirect

@ -260,7 +260,6 @@ github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=
github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
@ -280,7 +279,6 @@ github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=
github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
@ -296,7 +294,6 @@ github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI
github.com/jackc/pgtype v1.8.0/go.mod h1:PqDKcEBtllAtk/2p6z6SHdXW5UB+MhE75tUol2OKexE=
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=
github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
@ -309,7 +306,6 @@ github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CI
github.com/jackc/pgx/v4 v4.12.0/go.mod h1:fE547h6VulLPA3kySjfnSG/e2D861g/50JlVUa/ub60=
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw=
github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ=
github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E=
github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
@ -841,8 +837,8 @@ gorm.io/driver/mysql v1.3.2/go.mod h1:ChK6AHbHgDCFZyJp0F+BmVGb06PSIoh9uVYKAlRbb2
gorm.io/driver/mysql v1.3.6 h1:BhX1Y/RyALb+T9bZ3t07wLnPZBukt+IRkMn8UZSNbGM=
gorm.io/driver/mysql v1.3.6/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c=
gorm.io/driver/postgres v1.3.4/go.mod h1:y0vEuInFKJtijuSGu9e5bs5hzzSzPK+LancpKpvbRBw=
gorm.io/driver/postgres v1.3.9 h1:lWGiVt5CijhQAg0PWB7Od1RNcBw/jS4d2cAScBcSDXg=
gorm.io/driver/postgres v1.3.9/go.mod h1:qw/FeqjxmYqW5dBcYNBsnhQULIApQdk7YuuDPktVi1U=
gorm.io/driver/postgres v1.3.10 h1:Fsd+pQpFMGlGxxVMUPJhNo8gG8B1lKtk8QQ4/VZZAJw=
gorm.io/driver/postgres v1.3.10/go.mod h1:whNfh5WhhHs96honoLjBAMwJGYEuA3m1hvgUbNXhPCw=
gorm.io/driver/sqlite v1.3.1 h1:bwfE+zTEWklBYoEodIOIBwuWHpnx52Z9zJFW5F33WLk=
gorm.io/driver/sqlite v1.3.1/go.mod h1:wJx0hJspfycZ6myN38x1O/AqLtNS6c5o9TndewFbELg=
gorm.io/driver/sqlserver v1.3.1 h1:F5t6ScMzOgy1zukRTIZgLZwKahgt3q1woAILVolKpOI=

@ -1,14 +0,0 @@
Copyright (c) 2015 aliyun.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -1,345 +0,0 @@
package oss
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
"sort"
"strconv"
"strings"
"time"
)
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
type headerSorter struct {
Keys []string
Vals []string
}
// getAdditionalHeaderKeys get exist key in http header
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
var keysList []string
keysMap := make(map[string]string)
srcKeys := make(map[string]string)
for k := range req.Header {
srcKeys[strings.ToLower(k)] = ""
}
for _, v := range conn.config.AdditionalHeaders {
if _, ok := srcKeys[strings.ToLower(v)]; ok {
keysMap[strings.ToLower(v)] = ""
}
}
for k := range keysMap {
keysList = append(keysList, k)
}
sort.Strings(keysList)
return keysList, keysMap
}
// getAdditionalHeaderKeysV4 get exist key in http header
func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
var keysList []string
keysMap := make(map[string]string)
srcKeys := make(map[string]string)
for k := range req.Header {
srcKeys[strings.ToLower(k)] = ""
}
for _, v := range conn.config.AdditionalHeaders {
if _, ok := srcKeys[strings.ToLower(v)]; ok {
if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
keysMap[strings.ToLower(v)] = ""
}
}
}
for k := range keysMap {
keysList = append(keysList, k)
}
sort.Strings(keysList)
return keysList, keysMap
}
// signHeader signs the header and sets it as the authorization header.
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
akIf := conn.config.GetCredentials()
authorizationStr := ""
if conn.config.AuthVersion == AuthV4 {
strDay := ""
strDate := req.Header.Get(HttpHeaderOssDate)
if strDate == "" {
strDate = req.Header.Get(HTTPHeaderDate)
t, _ := time.Parse(http.TimeFormat, strDate)
strDay = t.Format("20060102")
} else {
t, _ := time.Parse(iso8601DateFormatSecond, strDate)
strDay = t.Format("20060102")
}
signHeaderProduct := conn.config.GetSignProduct()
signHeaderRegion := conn.config.GetSignRegion()
additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
if len(additionalList) > 0 {
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
additionnalHeadersStr := strings.Join(additionalList, ";")
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
} else {
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
}
} else if conn.config.AuthVersion == AuthV2 {
additionalList, _ := conn.getAdditionalHeaderKeys(req)
if len(additionalList) > 0 {
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
additionnalHeadersStr := strings.Join(additionalList, ";")
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
} else {
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
}
} else {
// Get the final authorization string
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
}
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
// Find out the "x-oss-"'s address in header of the request
ossHeadersMap := make(map[string]string)
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
ossHeadersMap[strings.ToLower(k)] = v[0]
} else if conn.config.AuthVersion == AuthV2 {
if _, ok := additionalMap[strings.ToLower(k)]; ok {
ossHeadersMap[strings.ToLower(k)] = v[0]
}
}
}
hs := newHeaderSorter(ossHeadersMap)
// Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := req.Header.Get(HTTPHeaderDate)
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
// default is v1 signature
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
// v2 signature
if conn.config.AuthVersion == AuthV2 {
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
}
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
}
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string) string {
// Find out the "x-oss-"'s address in header of the request
ossHeadersMap := make(map[string]string)
additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
} else {
if _, ok := additionalMap[strings.ToLower(k)]; ok {
ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
}
}
}
// Required parameters
signDate := ""
dateFormat := ""
date := req.Header.Get(HTTPHeaderDate)
if date != "" {
signDate = date
dateFormat = http.TimeFormat
}
ossDate := req.Header.Get(HttpHeaderOssDate)
_, ok := ossHeadersMap[strings.ToLower(HttpHeaderOssDate)]
if ossDate != "" {
signDate = ossDate
dateFormat = iso8601DateFormatSecond
if !ok {
ossHeadersMap[strings.ToLower(HttpHeaderOssDate)] = strings.Trim(ossDate, " ")
}
}
contentType := req.Header.Get(HTTPHeaderContentType)
_, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentType)]
if contentType != "" && !ok {
ossHeadersMap[strings.ToLower(HTTPHeaderContentType)] = strings.Trim(contentType, " ")
}
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
_, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)]
if contentMd5 != "" && !ok {
ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)] = strings.Trim(contentMd5, " ")
}
hs := newHeaderSorter(ossHeadersMap)
// Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
signStr := ""
// v4 signature
hashedPayload := req.Header.Get(HttpHeaderOssContentSha256)
// subResource
resource := canonicalizedResource
subResource := ""
subPos := strings.LastIndex(canonicalizedResource, "?")
if subPos != -1 {
subResource = canonicalizedResource[subPos+1:]
resource = canonicalizedResource[0:subPos]
}
// get canonical request
canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
rh := sha256.New()
io.WriteString(rh, canonicalReuqest)
hashedRequest := hex.EncodeToString(rh.Sum(nil))
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(canonicalReuqest))
}
// get day,eg 20210914
t, _ := time.Parse(dateFormat, signDate)
strDay := t.Format("20060102")
signedStrV4Product := conn.config.GetSignProduct()
signedStrV4Region := conn.config.GetSignRegion()
signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
}
h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
io.WriteString(h1, strDay)
h1Key := h1.Sum(nil)
h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
io.WriteString(h2, signedStrV4Region)
h2Key := h2.Sum(nil)
h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
io.WriteString(h3, signedStrV4Product)
h3Key := h3.Sum(nil)
h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
io.WriteString(h4, "aliyun_v4_request")
h4Key := h4.Sum(nil)
h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
io.WriteString(h, signStr)
return fmt.Sprintf("%x", h.Sum(nil))
}
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
if params[HTTPParamAccessKeyID] == nil {
return ""
}
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName)
canonParamsKeys := []string{}
for key := range params {
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken {
canonParamsKeys = append(canonParamsKeys, key)
}
}
sort.Strings(canonParamsKeys)
canonParamsStr := ""
for _, key := range canonParamsKeys {
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string))
}
expireStr := strconv.FormatInt(expiration, 10)
signStr := expireStr + "\n" + canonParamsStr + canonResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
// newHeaderSorter is an additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Sort is an additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Len is an additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Less is an additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Swap is an additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,229 +0,0 @@
package oss
import (
"bytes"
"fmt"
"log"
"net"
"os"
"time"
)
// Define the level of the output log
const (
LogOff = iota
Error
Warn
Info
Debug
)
// LogTag Tag for each level of log
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
// HTTPTimeout defines HTTP timeout.
type HTTPTimeout struct {
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
HeaderTimeout time.Duration
LongTimeout time.Duration
IdleConnTimeout time.Duration
}
// HTTPMaxConns defines max idle connections and max idle connections per host
type HTTPMaxConns struct {
MaxIdleConns int
MaxIdleConnsPerHost int
MaxConnsPerHost int
}
// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken
type Credentials interface {
GetAccessKeyID() string
GetAccessKeySecret() string
GetSecurityToken() string
}
// CredentialInfBuild is interface for get CredentialInf
type CredentialsProvider interface {
GetCredentials() Credentials
}
type defaultCredentials struct {
config *Config
}
func (defCre *defaultCredentials) GetAccessKeyID() string {
return defCre.config.AccessKeyID
}
func (defCre *defaultCredentials) GetAccessKeySecret() string {
return defCre.config.AccessKeySecret
}
func (defCre *defaultCredentials) GetSecurityToken() string {
return defCre.config.SecurityToken
}
type defaultCredentialsProvider struct {
config *Config
}
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
return &defaultCredentials{config: defBuild.config}
}
// Config defines oss configuration
type Config struct {
Endpoint string // OSS endpoint
AccessKeyID string // AccessId
AccessKeySecret string // AccessKey
RetryTimes uint // Retry count by default it's 5.
UserAgent string // SDK name/version/system information
IsDebug bool // Enable debug mode. Default is false.
Timeout uint // Timeout in seconds. By default it's 60.
SecurityToken string // STS Token
IsCname bool // If cname is in the endpoint.
HTTPTimeout HTTPTimeout // HTTP timeout
HTTPMaxConns HTTPMaxConns // Http max connections
IsUseProxy bool // Flag of using proxy.
ProxyHost string // Flag of using proxy host.
IsAuthProxy bool // Flag of needing authentication.
ProxyUser string // Proxy user
ProxyPassword string // Proxy password
IsEnableMD5 bool // Flag of enabling MD5 for upload.
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
IsEnableCRC bool // Flag of enabling CRC for upload.
LogLevel int // Log level
Logger *log.Logger // For write log
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
LocalAddr net.Addr // local client host info
UserSetUa bool // UserAgent is set by user or not
AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
AdditionalHeaders []string // special http headers needed to be sign
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
Region string // such as cn-hangzhou
CloudBoxId string //
Product string // oss or oss-cloudbox, default is oss
}
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
if uploadSpeed < 0 {
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
} else if uploadSpeed == 0 {
config.UploadLimitSpeed = 0
config.UploadLimiter = nil
return nil
}
var err error
config.UploadLimiter, err = GetOssLimiter(uploadSpeed)
if err == nil {
config.UploadLimitSpeed = uploadSpeed
}
return err
}
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
if downloadSpeed < 0 {
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
} else if downloadSpeed == 0 {
config.DownloadLimitSpeed = 0
config.DownloadLimiter = nil
return nil
}
var err error
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
if err == nil {
config.DownloadLimitSpeed = downloadSpeed
}
return err
}
// WriteLog output log function
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
if config.LogLevel < LogLevel || config.Logger == nil {
return
}
var logBuffer bytes.Buffer
logBuffer.WriteString(LogTag[LogLevel-1])
logBuffer.WriteString(fmt.Sprintf(format, a...))
config.Logger.Printf("%s", logBuffer.String())
}
// for get Credentials
func (config *Config) GetCredentials() Credentials {
return config.CredentialsProvider.GetCredentials()
}
// for get Sign Product
func (config *Config) GetSignProduct() string {
if config.CloudBoxId != "" {
return "oss-cloudbox"
}
return "oss"
}
// for get Sign Region
func (config *Config) GetSignRegion() string {
if config.CloudBoxId != "" {
return config.CloudBoxId
}
return config.Region
}
// getDefaultOssConfig gets the default configuration.
func getDefaultOssConfig() *Config {
config := Config{}
config.Endpoint = ""
config.AccessKeyID = ""
config.AccessKeySecret = ""
config.RetryTimes = 5
config.IsDebug = false
config.UserAgent = userAgent()
config.Timeout = 60 // Seconds
config.SecurityToken = ""
config.IsCname = false
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
config.HTTPMaxConns.MaxIdleConns = 100
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
config.IsUseProxy = false
config.ProxyHost = ""
config.IsAuthProxy = false
config.ProxyUser = ""
config.ProxyPassword = ""
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
config.IsEnableMD5 = false
config.IsEnableCRC = true
config.LogLevel = LogOff
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
provider := &defaultCredentialsProvider{config: &config}
config.CredentialsProvider = provider
config.AuthVersion = AuthV1
config.RedirectEnabled = true
config.InsecureSkipVerify = false
config.Product = "oss"
return &config
}

@ -1,917 +0,0 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"time"
)
// Conn defines OSS Conn
type Conn struct {
config *Config
url *urlMaker
client *http.Client
}
var signKeyList = []string{"acl", "uploads", "location", "cors",
"logging", "website", "referer", "lifecycle",
"delete", "append", "tagging", "objectMeta",
"uploadId", "partNumber", "security-token",
"position", "img", "style", "styleName",
"replication", "replicationProgress",
"replicationLocation", "cname", "bucketInfo",
"comp", "qos", "live", "status", "vod",
"startTime", "endTime", "symlink",
"x-oss-process", "response-content-type", "x-oss-traffic-limit",
"response-content-language", "response-expires",
"response-cache-control", "response-content-disposition",
"response-content-encoding", "udf", "udfName", "udfImage",
"udfId", "udfImageDesc", "udfApplication", "comp",
"udfApplicationLog", "restore", "callback", "callback-var", "qosInfo",
"policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment",
"x-oss-request-payer", "sequential",
"inventory", "inventoryId", "continuation-token", "asyncFetch",
"worm", "wormId", "wormExtend", "withHashContext",
"x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256",
"x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration",
"regionList", "cloudboxes", "x-oss-ac-source-ip", "x-oss-ac-subnet-mask", "x-oss-ac-vpc-id", "x-oss-ac-forward-allow",
"metaQuery",
}
// init initializes Conn
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
if client == nil {
// New transport
transport := newTransport(conn, config)
// Proxy
if conn.config.IsUseProxy {
proxyURL, err := url.Parse(config.ProxyHost)
if err != nil {
return err
}
if config.IsAuthProxy {
if config.ProxyPassword != "" {
proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword)
} else {
proxyURL.User = url.User(config.ProxyUser)
}
}
transport.Proxy = http.ProxyURL(proxyURL)
}
client = &http.Client{Transport: transport}
if !config.RedirectEnabled {
disableHTTPRedirect(client)
}
}
conn.config = config
conn.url = urlMaker
conn.client = client
return nil
}
// Do sends request and returns the response
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
urlParams := conn.getURLParams(params)
subResource := conn.getSubResource(params)
uri := conn.url.getURL(bucketName, objectName, urlParams)
resource := ""
if conn.config.AuthVersion != AuthV4 {
resource = conn.getResource(bucketName, objectName, subResource)
} else {
resource = conn.getResourceV4(bucketName, objectName, subResource)
}
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
}
// DoURL sends the request with signed URL and returns the response result.
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
// Get URI from signedURL
uri, err := url.ParseRequestURI(signedURL)
if err != nil {
return nil, err
}
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderHost, req.Host)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
publishProgress(listener, event)
if conn.config.LogLevel >= Debug {
conn.LoggerHTTPReq(req)
}
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
return nil, err
}
if conn.config.LogLevel >= Debug {
//print out http resp
conn.LoggerHTTPResp(req, resp)
}
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) getURLParams(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k))
if params[k] != nil && params[k].(string) != "" {
buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1))
}
}
return buf.String()
}
func (conn Conn) getSubResource(params map[string]interface{}) string {
// Sort
keys := make([]string, 0, len(params))
signParams := make(map[string]string)
for k := range params {
if conn.config.AuthVersion == AuthV2 || conn.config.AuthVersion == AuthV4 {
encodedKey := url.QueryEscape(k)
keys = append(keys, encodedKey)
if params[k] != nil && params[k] != "" {
signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)
}
} else if conn.isParamSign(k) {
keys = append(keys, k)
if params[k] != nil {
signParams[k] = params[k].(string)
}
}
}
sort.Strings(keys)
// Serialize
var buf bytes.Buffer
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(k)
if _, ok := signParams[k]; ok {
if signParams[k] != "" {
buf.WriteString("=" + signParams[k])
}
}
}
return buf.String()
}
func (conn Conn) isParamSign(paramKey string) bool {
for _, k := range signKeyList {
if paramKey == k {
return true
}
}
return false
}
// getResource gets canonicalized resource
func (conn Conn) getResource(bucketName, objectName, subResource string) string {
if subResource != "" {
subResource = "?" + subResource
}
if bucketName == "" {
if conn.config.AuthVersion == AuthV2 {
return url.QueryEscape("/") + subResource
}
return fmt.Sprintf("/%s%s", bucketName, subResource)
}
if conn.config.AuthVersion == AuthV2 {
return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource
}
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
}
// getResource gets canonicalized resource
func (conn Conn) getResourceV4(bucketName, objectName, subResource string) string {
if subResource != "" {
subResource = "?" + subResource
}
if bucketName == "" {
return fmt.Sprintf("/%s", subResource)
}
if objectName != "" {
objectName = url.QueryEscape(objectName)
objectName = strings.Replace(objectName, "+", "%20", -1)
objectName = strings.Replace(objectName, "%2F", "/", -1)
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
}
return fmt.Sprintf("/%s/%s", bucketName, subResource)
}
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
method = strings.ToUpper(method)
req := &http.Request{
Method: method,
URL: uri,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: uri.Host,
}
tracker := &readerTracker{completedBytes: 0}
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
if fd != nil {
defer func() {
fd.Close()
os.Remove(fd.Name())
}()
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
stNow := time.Now().UTC()
req.Header.Set(HTTPHeaderDate, stNow.Format(http.TimeFormat))
req.Header.Set(HTTPHeaderHost, req.Host)
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if conn.config.AuthVersion == AuthV4 {
req.Header.Set(HttpHeaderOssContentSha256, DefaultContentSha256)
}
akIf := conn.config.GetCredentials()
if akIf.GetSecurityToken() != "" {
req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken())
}
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
conn.signHeader(req, canonicalizedResource)
// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
publishProgress(listener, event)
if conn.config.LogLevel >= Debug {
conn.LoggerHTTPReq(req)
}
resp, err := conn.client.Do(req)
if err != nil {
// Transfer failed
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
return nil, err
}
if conn.config.LogLevel >= Debug {
//print out http resp
conn.LoggerHTTPResp(req, resp)
}
// Transfer completed
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
publishProgress(listener, event)
return conn.handleResponse(resp, crc)
}
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
akIf := conn.config.GetCredentials()
if akIf.GetSecurityToken() != "" {
params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
}
m := strings.ToUpper(string(method))
req := &http.Request{
Method: m,
Header: make(http.Header),
}
if conn.config.IsAuthProxy {
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
req.Header.Set("Proxy-Authorization", basic)
}
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
if headers != nil {
for k, v := range headers {
req.Header.Set(k, v)
}
}
if conn.config.AuthVersion == AuthV2 {
params[HTTPParamSignatureVersion] = "OSS2"
params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10)
params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID
additionalList, _ := conn.getAdditionalHeaderKeys(req)
if len(additionalList) > 0 {
params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";")
}
}
subResource := conn.getSubResource(params)
canonicalizedResource := conn.getResource(bucketName, objectName, subResource)
signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
if conn.config.AuthVersion == AuthV1 {
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
params[HTTPParamSignature] = signedStr
} else if conn.config.AuthVersion == AuthV2 {
params[HTTPParamSignatureV2] = signedStr
}
urlParams := conn.getURLParams(params)
return conn.url.getSignURL(bucketName, objectName, urlParams)
}
func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string {
params := map[string]interface{}{}
if playlistName != "" {
params[HTTPParamPlaylistName] = playlistName
}
expireStr := strconv.FormatInt(expiration, 10)
params[HTTPParamExpires] = expireStr
akIf := conn.config.GetCredentials()
if akIf.GetAccessKeyID() != "" {
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
if akIf.GetSecurityToken() != "" {
params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
}
signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params)
params[HTTPParamSignature] = signedStr
}
urlParams := conn.getURLParams(params)
return conn.url.getSignRtmpURL(bucketName, channelName, urlParams)
}
// handleBody handles request body
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
var file *os.File
var crc hash.Hash64
reader := body
readerLen, err := GetReaderLen(reader)
if err == nil {
req.ContentLength = readerLen
}
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
// MD5
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
md5 := ""
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
req.Header.Set(HTTPHeaderContentMD5, md5)
}
// CRC
if reader != nil && conn.config.IsEnableCRC {
crc = NewCRC(CrcTable(), initCRC)
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
}
// HTTP body
rc, ok := reader.(io.ReadCloser)
if !ok && reader != nil {
rc = ioutil.NopCloser(reader)
}
if conn.isUploadLimitReq(req) {
limitReader := &LimitSpeedReader{
reader: rc,
ossLimiter: conn.config.UploadLimiter,
}
req.Body = limitReader
} else {
req.Body = rc
}
return file, crc
}
// isUploadLimitReq: judge limit upload speed or not
func (conn Conn) isUploadLimitReq(req *http.Request) bool {
if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil {
return false
}
if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" {
if req.ContentLength > 0 {
return true
}
}
return false
}
func tryGetFileSize(f *os.File) int64 {
fInfo, _ := f.Stat()
return fInfo.Size()
}
// handleResponse handles response
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
var cliCRC uint64
var srvCRC uint64
statusCode := resp.StatusCode
if statusCode/100 != 2 {
if statusCode >= 400 && statusCode <= 505 {
// 4xx and 5xx indicate that the operation has error occurred
var respBody []byte
respBody, err := readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
err = ServiceError{
StatusCode: statusCode,
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
}
} else {
// Response contains storage service error object, unmarshal
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
resp.Header.Get(HTTPHeaderOssRequestID))
if errIn != nil { // error unmarshaling the error response
err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
} else {
err = srvErr
}
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
}, err
} else if statusCode >= 300 && statusCode <= 307 {
// OSS use 3xx, but response has no body
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: resp.Body,
}, err
} else {
// (0,300) [308,400) [506,)
// Other extended http StatusCode
var respBody []byte
respBody, err := readResponseBody(resp)
if err != nil {
return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err
}
if len(respBody) == 0 {
err = ServiceError{
StatusCode: statusCode,
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
}
} else {
// Response contains storage service error object, unmarshal
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
resp.Header.Get(HTTPHeaderOssRequestID))
if errIn != nil { // error unmarshaling the error response
err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
} else {
err = srvErr
}
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
}, err
}
} else {
if conn.config.IsEnableCRC && crc != nil {
cliCRC = crc.Sum64()
}
srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
realBody := resp.Body
if conn.isDownloadLimitResponse(resp) {
limitReader := &LimitSpeedReader{
reader: realBody,
ossLimiter: conn.config.DownloadLimiter,
}
realBody = limitReader
}
// 2xx, successful
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: realBody,
ClientCRC: cliCRC,
ServerCRC: srvCRC,
}, nil
}
}
// isUploadLimitReq: judge limit upload speed or not
func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool {
if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil {
return false
}
if strings.EqualFold(resp.Request.Method, "GET") {
return true
}
return false
}
// LoggerHTTPReq Print the header information of the http request
func (conn Conn) LoggerHTTPReq(req *http.Request) {
var logBuffer bytes.Buffer
logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method))
logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host))
logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path))
logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery))
logBuffer.WriteString(fmt.Sprintf("Header info:"))
for k, v := range req.Header {
var valueBuffer bytes.Buffer
for j := 0; j < len(v); j++ {
if j > 0 {
valueBuffer.WriteString(" ")
}
valueBuffer.WriteString(v[j])
}
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String()))
}
conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
}
// LoggerHTTPResp Print Response to http request
func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) {
var logBuffer bytes.Buffer
logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode))
logBuffer.WriteString(fmt.Sprintf("Header info:"))
for k, v := range resp.Header {
var valueBuffer bytes.Buffer
for j := 0; j < len(v); j++ {
if j > 0 {
valueBuffer.WriteString(" ")
}
valueBuffer.WriteString(v[j])
}
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String()))
}
conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
}
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
if contentLen == 0 || contentLen > md5Threshold {
// Huge body, use temporary file
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
if tempFile != nil {
io.Copy(tempFile, body)
tempFile.Seek(0, os.SEEK_SET)
md5 := md5.New()
io.Copy(md5, tempFile)
sum := md5.Sum(nil)
b64 = base64.StdEncoding.EncodeToString(sum[:])
tempFile.Seek(0, os.SEEK_SET)
reader = tempFile
}
} else {
// Small body, use memory
buf, _ := ioutil.ReadAll(body)
sum := md5.Sum(buf)
b64 = base64.StdEncoding.EncodeToString(sum[:])
reader = bytes.NewReader(buf)
}
return
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err == io.EOF {
err = nil
}
return out, err
}
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
var storageErr ServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
storageErr.RawMessage = string(body)
return storageErr, nil
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
func jsonUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return json.Unmarshal(data, v)
}
// timeoutConn handles HTTP timeout
type timeoutConn struct {
conn net.Conn
timeout time.Duration
longTimeout time.Duration
}
func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
conn.SetReadDeadline(time.Now().Add(longTimeout))
return &timeoutConn{
conn: conn,
timeout: timeout,
longTimeout: longTimeout,
}
}
func (c *timeoutConn) Read(b []byte) (n int, err error) {
c.SetReadDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Read(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Write(b []byte) (n int, err error) {
c.SetWriteDeadline(time.Now().Add(c.timeout))
n, err = c.conn.Write(b)
c.SetReadDeadline(time.Now().Add(c.longTimeout))
return n, err
}
func (c *timeoutConn) Close() error {
return c.conn.Close()
}
func (c *timeoutConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *timeoutConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
func (c *timeoutConn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
func (c *timeoutConn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// UrlMaker builds URL and resource
const (
urlTypeCname = 1
urlTypeIP = 2
urlTypeAliyun = 3
)
type urlMaker struct {
Scheme string // HTTP or HTTPS
NetLoc string // Host or IP
Type int // 1 CNAME, 2 IP, 3 ALIYUN
IsProxy bool // Proxy
}
// Init parses endpoint
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error {
if strings.HasPrefix(endpoint, "http://") {
um.Scheme = "http"
um.NetLoc = endpoint[len("http://"):]
} else if strings.HasPrefix(endpoint, "https://") {
um.Scheme = "https"
um.NetLoc = endpoint[len("https://"):]
} else {
um.Scheme = "http"
um.NetLoc = endpoint
}
//use url.Parse() to get real host
strUrl := um.Scheme + "://" + um.NetLoc
url, err := url.Parse(strUrl)
if err != nil {
return err
}
um.NetLoc = url.Host
host, _, err := net.SplitHostPort(um.NetLoc)
if err != nil {
host = um.NetLoc
if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
}
ip := net.ParseIP(host)
if ip != nil {
um.Type = urlTypeIP
} else if isCname {
um.Type = urlTypeCname
} else {
um.Type = urlTypeAliyun
}
um.IsProxy = isProxy
return nil
}
// getURL gets URL
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
host, path := um.buildURL(bucket, object)
addr := ""
if params == "" {
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
} else {
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
uri, _ := url.ParseRequestURI(addr)
return uri
}
// getSignURL gets sign URL
func (um urlMaker) getSignURL(bucket, object, params string) string {
host, path := um.buildURL(bucket, object)
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
}
// getSignRtmpURL Build Sign Rtmp URL
func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string {
host, path := um.buildURL(bucket, "live")
channelName = url.QueryEscape(channelName)
channelName = strings.Replace(channelName, "+", "%20", -1)
return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params)
}
// buildURL builds URL
func (um urlMaker) buildURL(bucket, object string) (string, string) {
var host = ""
var path = ""
object = url.QueryEscape(object)
object = strings.Replace(object, "+", "%20", -1)
if um.Type == urlTypeCname {
host = um.NetLoc
path = "/" + object
} else if um.Type == urlTypeIP {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = um.NetLoc
path = fmt.Sprintf("/%s/%s", bucket, object)
}
} else {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = bucket + "." + um.NetLoc
path = "/" + object
}
}
return host, path
}
// buildURL builds URL
func (um urlMaker) buildURLV4(bucket, object string) (string, string) {
var host = ""
var path = ""
object = url.QueryEscape(object)
object = strings.Replace(object, "+", "%20", -1)
// no escape /
object = strings.Replace(object, "%2F", "/", -1)
if um.Type == urlTypeCname {
host = um.NetLoc
path = "/" + object
} else if um.Type == urlTypeIP {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = um.NetLoc
path = fmt.Sprintf("/%s/%s", bucket, object)
}
} else {
if bucket == "" {
host = um.NetLoc
path = "/"
} else {
host = bucket + "." + um.NetLoc
path = fmt.Sprintf("/%s/%s", bucket, object)
}
}
return host, path
}

@ -1,265 +0,0 @@
package oss
import "os"
// ACLType bucket/object ACL
type ACLType string
const (
// ACLPrivate definition : private read and write
ACLPrivate ACLType = "private"
// ACLPublicRead definition : public read and private write
ACLPublicRead ACLType = "public-read"
// ACLPublicReadWrite definition : public read and public write
ACLPublicReadWrite ACLType = "public-read-write"
// ACLDefault Object. It's only applicable for object.
ACLDefault ACLType = "default"
)
// bucket versioning status
type VersioningStatus string
const (
// Versioning Status definition: Enabled
VersionEnabled VersioningStatus = "Enabled"
// Versioning Status definition: Suspended
VersionSuspended VersioningStatus = "Suspended"
)
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
type MetadataDirectiveType string
const (
// MetaCopy the target object's metadata is copied from the source one
MetaCopy MetadataDirectiveType = "COPY"
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
MetaReplace MetadataDirectiveType = "REPLACE"
)
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
type TaggingDirectiveType string
const (
// TaggingCopy the target object's tagging is copied from the source one
TaggingCopy TaggingDirectiveType = "COPY"
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
TaggingReplace TaggingDirectiveType = "REPLACE"
)
// AlgorithmType specifying the server side encryption algorithm name
type AlgorithmType string
const (
KMSAlgorithm AlgorithmType = "KMS"
AESAlgorithm AlgorithmType = "AES256"
SM4Algorithm AlgorithmType = "SM4"
)
// StorageClassType bucket storage type
type StorageClassType string
const (
// StorageStandard standard
StorageStandard StorageClassType = "Standard"
// StorageIA infrequent access
StorageIA StorageClassType = "IA"
// StorageArchive archive
StorageArchive StorageClassType = "Archive"
// StorageColdArchive cold archive
StorageColdArchive StorageClassType = "ColdArchive"
)
//RedundancyType bucket data Redundancy type
type DataRedundancyType string
const (
// RedundancyLRS Local redundancy, default value
RedundancyLRS DataRedundancyType = "LRS"
// RedundancyZRS Same city redundancy
RedundancyZRS DataRedundancyType = "ZRS"
)
//ObjecthashFuncType
type ObjecthashFuncType string
const (
HashFuncSha1 ObjecthashFuncType = "SHA-1"
HashFuncSha256 ObjecthashFuncType = "SHA-256"
)
// PayerType the type of request payer
type PayerType string
const (
// Requester the requester who send the request
Requester PayerType = "Requester"
// BucketOwner the requester who send the request
BucketOwner PayerType = "BucketOwner"
)
//RestoreMode the restore mode for coldArchive object
type RestoreMode string
const (
//RestoreExpedited object will be restored in 1 hour
RestoreExpedited RestoreMode = "Expedited"
//RestoreStandard object will be restored in 2-5 hours
RestoreStandard RestoreMode = "Standard"
//RestoreBulk object will be restored in 5-10 hours
RestoreBulk RestoreMode = "Bulk"
)
// HTTPMethod HTTP request method
type HTTPMethod string
const (
// HTTPGet HTTP GET
HTTPGet HTTPMethod = "GET"
// HTTPPut HTTP PUT
HTTPPut HTTPMethod = "PUT"
// HTTPHead HTTP HEAD
HTTPHead HTTPMethod = "HEAD"
// HTTPPost HTTP POST
HTTPPost HTTPMethod = "POST"
// HTTPDelete HTTP DELETE
HTTPDelete HTTPMethod = "DELETE"
)
// HTTP headers
const (
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
HTTPHeaderAuthorization = "Authorization"
HTTPHeaderCacheControl = "Cache-Control"
HTTPHeaderContentDisposition = "Content-Disposition"
HTTPHeaderContentEncoding = "Content-Encoding"
HTTPHeaderContentLength = "Content-Length"
HTTPHeaderContentMD5 = "Content-MD5"
HTTPHeaderContentType = "Content-Type"
HTTPHeaderContentLanguage = "Content-Language"
HTTPHeaderDate = "Date"
HTTPHeaderEtag = "ETag"
HTTPHeaderExpires = "Expires"
HTTPHeaderHost = "Host"
HTTPHeaderLastModified = "Last-Modified"
HTTPHeaderRange = "Range"
HTTPHeaderLocation = "Location"
HTTPHeaderOrigin = "Origin"
HTTPHeaderServer = "Server"
HTTPHeaderUserAgent = "User-Agent"
HTTPHeaderIfModifiedSince = "If-Modified-Since"
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
HTTPHeaderIfMatch = "If-Match"
HTTPHeaderIfNoneMatch = "If-None-Match"
HTTPHeaderACReqMethod = "Access-Control-Request-Method"
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
HTTPHeaderOssACL = "X-Oss-Acl"
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
HTTPHeaderOssCallback = "X-Oss-Callback"
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
HTTPHeaderOssRequester = "X-Oss-Request-Payer"
HTTPHeaderOssTagging = "X-Oss-Tagging"
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
HTTPHeaderOssTaskID = "X-Oss-Task-Id"
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
HttpHeaderOssDate = "X-Oss-Date"
HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
HttpHeaderOssNotification = "X-Oss-Notification"
)
// HTTP Param
const (
HTTPParamExpires = "Expires"
HTTPParamAccessKeyID = "OSSAccessKeyId"
HTTPParamSignature = "Signature"
HTTPParamSecurityToken = "security-token"
HTTPParamPlaylistName = "playlistName"
HTTPParamSignatureVersion = "x-oss-signature-version"
HTTPParamExpiresV2 = "x-oss-expires"
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
HTTPParamSignatureV2 = "x-oss-signature"
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
)
// Other constants
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
MinPartSize = 100 * 1024 // Min part size, 100KB
FilePermMode = os.FileMode(0664) // Default file permission
TempFilePrefix = "oss-go-temp-" // Temp file prefix
TempFileSuffix = ".temp" // Temp file suffix
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
NullVersion = "null"
DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
Version = "v2.2.5" // Go SDK version
)
// FrameType
const (
DataFrameType = 8388609
ContinuousFrameType = 8388612
EndFrameType = 8388613
MetaEndFrameCSVType = 8388614
MetaEndFrameJSONType = 8388615
)
// AuthVersion the version of auth
type AuthVersionType string
const (
// AuthV1 v1
AuthV1 AuthVersionType = "v1"
// AuthV2 v2
AuthV2 AuthVersionType = "v2"
// AuthV4 v4
AuthV4 AuthVersionType = "v4"
)

@ -1,123 +0,0 @@
package oss
import (
"hash"
"hash/crc64"
)
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
tab *crc64.Table
}
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
// using the polynomial represented by the Table.
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
// Size returns the number of bytes sum will return.
func (d *digest) Size() int { return crc64.Size }
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int { return 1 }
// Reset resets the hash to its initial state.
func (d *digest) Reset() { d.crc = 0 }
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = crc64.Update(d.crc, d.tab, p)
return len(p), nil
}
// Sum64 returns CRC64 value.
func (d *digest) Sum64() uint64 { return d.crc }
// Sum returns hash value.
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// gf2Dim dimension of GF(2) vectors (length of CRC)
const gf2Dim int = 64
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for i := 0; vec != 0; i++ {
if vec&1 != 0 {
sum ^= mat[i]
}
vec >>= 1
}
return sum
}
func gf2MatrixSquare(square []uint64, mat []uint64) {
for n := 0; n < gf2Dim; n++ {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// CRC64Combine combines CRC64
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
// Degenerate case
if len2 == 0 {
return crc1
}
// Put operator for one zero bit in odd
odd[0] = crc64.ECMA // CRC64 polynomial
var row uint64 = 1
for n := 1; n < gf2Dim; n++ {
odd[n] = row
row <<= 1
}
// Put operator for two zero bits in even
gf2MatrixSquare(even[:], odd[:])
// Put operator for four zero bits in odd
gf2MatrixSquare(odd[:], even[:])
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
for {
// Apply zeros operator for this bit of len2
gf2MatrixSquare(even[:], odd[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(even[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
// Another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd[:], even[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(odd[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
}
// Return combined CRC
crc1 ^= crc2
return crc1
}

@ -1,567 +0,0 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
// DownloadFile downloads files with multipart download.
//
// objectKey the object key.
// filePath the local file to download from objectKey in OSS.
// partSize the part size in bytes.
// options object's constraints, check out GetObject for the reference.
//
// error it's nil when the call succeeds, otherwise it's an error object.
//
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < 1 {
return errors.New("oss: part size smaller than 1")
}
uRange, err := GetRangeConfig(options)
if err != nil {
return err
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
var strVersionId string
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
strVersionId = versionId.(string)
}
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
if cpFilePath != "" {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
}
}
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
}
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
absPath, _ := filepath.Abs(destFile)
cpFileName := getCpFileName(src, absPath, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// downloadWorkerArg is download worker's parameters
type downloadWorkerArg struct {
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
enableCRC bool
}
// downloadPartHook is hook for test
type downloadPartHook func(part downloadPart) error
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
func defaultDownloadPartHook(part downloadPart) error {
return nil
}
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
type defaultDownloadProgressListener struct {
}
// ProgressChanged no-ops
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
}
// downloadWorker
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
for part := range jobs {
if err := arg.hook(part); err != nil {
failed <- err
break
}
// Resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
var respHeader http.Header
opts := make([]Option, len(arg.options)+3)
// Append orderly, can not be reversed!
opts = append(opts, arg.options...)
opts = append(opts, r, p, GetResponseHeader(&respHeader))
rd, err := arg.bucket.GetObject(arg.key, opts...)
if err != nil {
failed <- err
break
}
defer rd.Close()
var crcCalc hash.Hash64
if arg.enableCRC {
crcCalc = crc64.New(CrcTable())
contentLen := part.End - part.Start + 1
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
}
defer rd.Close()
select {
case <-die:
return
default:
}
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
if err != nil {
failed <- err
break
}
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
if err != nil {
fd.Close()
failed <- err
break
}
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
_, err = io.Copy(fd, rd)
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
if err != nil {
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
fd.Close()
failed <- err
break
}
if arg.enableCRC {
part.CRC64 = crcCalc.Sum64()
}
fd.Close()
results <- part
}
}
// downloadScheduler
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// downloadPart defines download part
type downloadPart struct {
Index int // Part number, starting from 0
Start int64 // Start index
End int64 // End index
Offset int64 // Offset
CRC64 uint64 // CRC check value of part
}
// getDownloadParts gets download parts
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
parts := []downloadPart{}
part := downloadPart{}
i := 0
start, end := AdjustRange(uRange, objectSize)
for offset := start; offset < end; offset += partSize {
part.Index = i
part.Start = offset
part.End = GetPartEnd(offset, end, partSize)
part.Offset = start
part.CRC64 = 0
parts = append(parts, part)
i++
}
return parts
}
// getObjectBytes gets object bytes length
func getObjectBytes(parts []downloadPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// combineCRCInParts caculates the total CRC of continuous parts
func combineCRCInParts(dps []downloadPart) uint64 {
if dps == nil || len(dps) == 0 {
return 0
}
crc := dps[0].CRC64
for i := 1; i < len(dps); i++ {
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
}
return crc
}
// downloadFile downloads file concurrently without checkpoint.
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := GetProgressListener(options)
// If the file does not exist, create one. If exists, the download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Get the object detailed meta for object whole size
// must delete header:range to get whole object size
skipOptions := DeleteOption(options, HTTPHeaderRange)
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
enableCRC := false
expectedCRC := (uint64)(0)
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
enableCRC = true
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
// Get the parts of the file
parts := getDownloadParts(objectSize, partSize, uRange)
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start the download workers
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Download parts concurrently
go downloadScheduler(jobs, parts)
// Waiting for parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
downBytes := (part.End - part.Start + 1)
completedBytes += downBytes
parts[part.Index].CRC64 = part.CRC64
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
if enableCRC {
actualCRC := combineCRCInParts(parts)
err = CheckDownloadCRC(actualCRC, expectedCRC)
if err != nil {
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// ----- Concurrent download with chcekpoint -----
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
type downloadCheckpoint struct {
Magic string // Magic
MD5 string // Checkpoint content MD5
FilePath string // Local file
Object string // Key
ObjStat objectStat // Object status
Parts []downloadPart // All download parts
PartStat []bool // Parts' download status
Start int64 // Start point of the file
End int64 // End point of the file
enableCRC bool // Whether has CRC check
CRC uint64 // CRC check value
}
type objectStat struct {
Size int64 // Object size
LastModified string // Last modified time
Etag string // Etag
}
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
// Compare the CP's Magic and the MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
// Compare the object size, last modified time and etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
// Check the download range
if uRange != nil {
start, end := AdjustRange(uRange, objectSize)
if start != cp.Start || end != cp.End {
return false, nil
}
}
return true, nil
}
// load checkpoint from local file
func (cp *downloadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump funciton dumps to file
func (cp *downloadCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialize
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts gets unfinished parts
func (cp downloadCheckpoint) todoParts() []downloadPart {
dps := []downloadPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes gets completed size
func (cp downloadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initiates download tasks
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
// CP
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
cp.enableCRC = true
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
// Parts
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
return nil
}
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
err := os.Rename(downFilepath, cp.FilePath)
if err != nil {
return err
}
return os.Remove(cpFilePath)
}
// downloadFileWithCp downloads files with checkpoint.
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := GetProgressListener(options)
// Load checkpoint data.
dcp := downloadCheckpoint{}
err := dcp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Get the object detailed meta for object whole size
// must delete header:range to get whole object size
skipOptions := DeleteOption(options, HTTPHeaderRange)
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
// Load error or data invalid. Re-initialize the download.
valid, err := dcp.isValid(meta, uRange)
if err != nil || !valid {
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Create the file if not exists. Otherwise the parts download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Unfinished parts
parts := dcp.todoParts()
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := dcp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the download workers routine
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Concurrently downloads parts
go downloadScheduler(jobs, parts)
// Wait for the parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
dcp.PartStat[part.Index] = true
dcp.Parts[part.Index].CRC64 = part.CRC64
dcp.dump(cpFilePath)
downBytes := (part.End - part.Start + 1)
completedBytes += downBytes
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
if dcp.enableCRC {
actualCRC := combineCRCInParts(dcp.Parts)
err = CheckDownloadCRC(actualCRC, dcp.CRC)
if err != nil {
return err
}
}
return dcp.complete(cpFilePath, tempFilePath)
}

@ -1,94 +0,0 @@
package oss
import (
"encoding/xml"
"fmt"
"net/http"
"strings"
)
// ServiceError contains fields of the error response from Oss Service REST API.
type ServiceError struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"` // The error code returned from OSS to the caller
Message string `xml:"Message"` // The detail error message from OSS
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
HostID string `xml:"HostId"` // The OSS server cluster's Id
Endpoint string `xml:"Endpoint"`
RawMessage string // The raw messages from OSS
StatusCode int // HTTP status code
}
// Error implements interface error
func (e ServiceError) Error() string {
if e.Endpoint == "" {
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
e.StatusCode, e.Code, e.Message, e.RequestID)
}
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int // The expected HTTP stats code returned from OSS
got int // The actual HTTP status code from OSS
}
// Error implements interface error
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by oss.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// CheckRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func CheckRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // Calculated CRC64 in client
serverCRC uint64 // Calculated CRC64 in server
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
requestID string // The request id of this operation
}
// Error implements interface error
func (e CRCCheckError) Error() string {
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
if clientCRC == serverCRC {
return nil
}
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
}
func CheckCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil
}
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
}

@ -1,29 +0,0 @@
//go:build !go1.7
// +build !go1.7
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
// this file is only for build,not supports limit upload speed
package oss
import (
"fmt"
"io"
)
const (
perTokenBandwidthSize int = 1024
)
type OssLimiter struct {
}
type LimitSpeedReader struct {
io.ReadCloser
reader io.Reader
ossLimiter *OssLimiter
}
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
err = fmt.Errorf("rate.Limiter is not supported below version go1.7")
return nil, err
}

@ -1,91 +0,0 @@
//go:build go1.7
// +build go1.7
package oss
import (
"fmt"
"io"
"math"
"time"
"golang.org/x/time/rate"
)
const (
perTokenBandwidthSize int = 1024
)
// OssLimiter wrapper rate.Limiter
type OssLimiter struct {
limiter *rate.Limiter
}
// GetOssLimiter create OssLimiter
// uploadSpeed KB/s
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
// first consume the initial full token,the limiter will behave more accurately
limiter.AllowN(time.Now(), uploadSpeed)
return &OssLimiter{
limiter: limiter,
}, nil
}
// LimitSpeedReader for limit bandwidth upload
type LimitSpeedReader struct {
io.ReadCloser
reader io.Reader
ossLimiter *OssLimiter
}
// Read
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
n = 0
err = nil
start := 0
burst := r.ossLimiter.limiter.Burst()
var end int
var tmpN int
var tc int
for start < len(p) {
if start+burst*perTokenBandwidthSize < len(p) {
end = start + burst*perTokenBandwidthSize
} else {
end = len(p)
}
tmpN, err = r.reader.Read(p[start:end])
if tmpN > 0 {
n += tmpN
start = n
}
if err != nil {
return
}
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize)))
now := time.Now()
re := r.ossLimiter.limiter.ReserveN(now, tc)
if !re.OK() {
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
start, end, burst, perTokenBandwidthSize)
return
}
timeDelay := re.Delay()
time.Sleep(timeDelay)
}
return
}
// Close ...
func (r *LimitSpeedReader) Close() error {
rc, ok := r.reader.(io.ReadCloser)
if ok {
return rc.Close()
}
return nil
}

@ -1,257 +0,0 @@
package oss
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
//
// CreateLiveChannel create a live-channel
//
// channelName the name of the channel
// config configuration of the channel
//
// CreateLiveChannelResult the result of create live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) {
var out CreateLiveChannelResult
bs, err := xml.Marshal(config)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := map[string]interface{}{}
params["live"] = nil
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
//
// channelName the name of the channel
// status enabled/disabled
//
// error nil if success, otherwise error
//
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
params := map[string]interface{}{}
params["live"] = nil
params["status"] = status
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
//
// channelName the name of the channel
// playlistName the name of the playlist, must end with ".m3u8"
// startTime the start time of the playlist
// endTime the endtime of the playlist
//
// error nil if success, otherwise error
//
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error {
params := map[string]interface{}{}
params["vod"] = nil
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
key := fmt.Sprintf("%s/%s", channelName, playlistName)
resp, err := bucket.do("POST", key, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
//
// channelName the name of the channel
// startTime the start time of the playlist
// endTime the endtime of the playlist
//
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error nil if success, otherwise error
//
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) {
params := map[string]interface{}{}
params["vod"] = nil
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return nil, err
}
return resp.Body, nil
}
//
// GetLiveChannelStat Get the state of the live-channel
//
// channelName the name of the channel
//
// LiveChannelStat the state of the live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) {
var out LiveChannelStat
params := map[string]interface{}{}
params["live"] = nil
params["comp"] = "stat"
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// GetLiveChannelInfo Get the configuration info of the live-channel
//
// channelName the name of the channel
//
// LiveChannelConfiguration the configuration info of the live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) {
var out LiveChannelConfiguration
params := map[string]interface{}{}
params["live"] = nil
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// GetLiveChannelHistory Get push records of live-channel
//
// channelName the name of the channel
//
// LiveChannelHistory push records
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) {
var out LiveChannelHistory
params := map[string]interface{}{}
params["live"] = nil
params["comp"] = "history"
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// ListLiveChannel list the live-channels
//
// options Prefix: filter by the name start with the value of "Prefix"
// MaxKeys: the maximum count returned
// Marker: cursor from which starting list
//
// ListLiveChannelResult live-channel list
// error nil if success, otherwise error
//
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
var out ListLiveChannelResult
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["live"] = nil
resp, err := bucket.do("GET", "", params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
//
// channelName the name of the channel
//
// error nil if success, otherwise error
//
func (bucket Bucket) DeleteLiveChannel(channelName string) error {
params := map[string]interface{}{}
params["live"] = nil
if channelName == "" {
return fmt.Errorf("invalid argument: channel name is empty")
}
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
//
// channelName the name of the channel
// playlistName the name of the playlist, must end with ".m3u8"
// expires expiration (in seconds)
//
// string singed rtmp push stream url
// error nil if success, otherwise error
//
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) {
if expires <= 0 {
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires)
}
expiration := time.Now().Unix() + expires
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil
}

@ -1,594 +0,0 @@
package oss
import (
"mime"
"path"
"strings"
)
var extToMimeType = map[string]string{
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
".apk": "application/vnd.android.package-archive",
".hqx": "application/mac-binhex40",
".cpt": "application/mac-compactpro",
".doc": "application/msword",
".ogg": "application/ogg",
".pdf": "application/pdf",
".rtf": "text/rtf",
".mif": "application/vnd.mif",
".xls": "application/vnd.ms-excel",
".ppt": "application/vnd.ms-powerpoint",
".odc": "application/vnd.oasis.opendocument.chart",
".odb": "application/vnd.oasis.opendocument.database",
".odf": "application/vnd.oasis.opendocument.formula",
".odg": "application/vnd.oasis.opendocument.graphics",
".otg": "application/vnd.oasis.opendocument.graphics-template",
".odi": "application/vnd.oasis.opendocument.image",
".odp": "application/vnd.oasis.opendocument.presentation",
".otp": "application/vnd.oasis.opendocument.presentation-template",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
".odt": "application/vnd.oasis.opendocument.text",
".odm": "application/vnd.oasis.opendocument.text-master",
".ott": "application/vnd.oasis.opendocument.text-template",
".oth": "application/vnd.oasis.opendocument.text-web",
".sxw": "application/vnd.sun.xml.writer",
".stw": "application/vnd.sun.xml.writer.template",
".sxc": "application/vnd.sun.xml.calc",
".stc": "application/vnd.sun.xml.calc.template",
".sxd": "application/vnd.sun.xml.draw",
".std": "application/vnd.sun.xml.draw.template",
".sxi": "application/vnd.sun.xml.impress",
".sti": "application/vnd.sun.xml.impress.template",
".sxg": "application/vnd.sun.xml.writer.global",
".sxm": "application/vnd.sun.xml.math",
".sis": "application/vnd.symbian.install",
".wbxml": "application/vnd.wap.wbxml",
".wmlc": "application/vnd.wap.wmlc",
".wmlsc": "application/vnd.wap.wmlscriptc",
".bcpio": "application/x-bcpio",
".torrent": "application/x-bittorrent",
".bz2": "application/x-bzip2",
".vcd": "application/x-cdlink",
".pgn": "application/x-chess-pgn",
".cpio": "application/x-cpio",
".csh": "application/x-csh",
".dvi": "application/x-dvi",
".spl": "application/x-futuresplash",
".gtar": "application/x-gtar",
".hdf": "application/x-hdf",
".jar": "application/x-java-archive",
".jnlp": "application/x-java-jnlp-file",
".js": "application/x-javascript",
".ksp": "application/x-kspread",
".chrt": "application/x-kchart",
".kil": "application/x-killustrator",
".latex": "application/x-latex",
".rpm": "application/x-rpm",
".sh": "application/x-sh",
".shar": "application/x-shar",
".swf": "application/x-shockwave-flash",
".sit": "application/x-stuffit",
".sv4cpio": "application/x-sv4cpio",
".sv4crc": "application/x-sv4crc",
".tar": "application/x-tar",
".tcl": "application/x-tcl",
".tex": "application/x-tex",
".man": "application/x-troff-man",
".me": "application/x-troff-me",
".ms": "application/x-troff-ms",
".ustar": "application/x-ustar",
".src": "application/x-wais-source",
".zip": "application/zip",
".m3u": "audio/x-mpegurl",
".ra": "audio/x-pn-realaudio",
".wav": "audio/x-wav",
".wma": "audio/x-ms-wma",
".wax": "audio/x-ms-wax",
".pdb": "chemical/x-pdb",
".xyz": "chemical/x-xyz",
".bmp": "image/bmp",
".gif": "image/gif",
".ief": "image/ief",
".png": "image/png",
".wbmp": "image/vnd.wap.wbmp",
".ras": "image/x-cmu-raster",
".pnm": "image/x-portable-anymap",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".ppm": "image/x-portable-pixmap",
".rgb": "image/x-rgb",
".xbm": "image/x-xbitmap",
".xpm": "image/x-xpixmap",
".xwd": "image/x-xwindowdump",
".css": "text/css",
".rtx": "text/richtext",
".tsv": "text/tab-separated-values",
".jad": "text/vnd.sun.j2me.app-descriptor",
".wml": "text/vnd.wap.wml",
".wmls": "text/vnd.wap.wmlscript",
".etx": "text/x-setext",
".mxu": "video/vnd.mpegurl",
".flv": "video/x-flv",
".wm": "video/x-ms-wm",
".wmv": "video/x-ms-wmv",
".wmx": "video/x-ms-wmx",
".wvx": "video/x-ms-wvx",
".avi": "video/x-msvideo",
".movie": "video/x-sgi-movie",
".ice": "x-conference/x-cooltalk",
".3gp": "video/3gpp",
".ai": "application/postscript",
".aif": "audio/x-aiff",
".aifc": "audio/x-aiff",
".aiff": "audio/x-aiff",
".asc": "text/plain",
".atom": "application/atom+xml",
".au": "audio/basic",
".bin": "application/octet-stream",
".cdf": "application/x-netcdf",
".cgm": "image/cgm",
".class": "application/octet-stream",
".dcr": "application/x-director",
".dif": "video/x-dv",
".dir": "application/x-director",
".djv": "image/vnd.djvu",
".djvu": "image/vnd.djvu",
".dll": "application/octet-stream",
".dmg": "application/octet-stream",
".dms": "application/octet-stream",
".dtd": "application/xml-dtd",
".dv": "video/x-dv",
".dxr": "application/x-director",
".eps": "application/postscript",
".exe": "application/octet-stream",
".ez": "application/andrew-inset",
".gram": "application/srgs",
".grxml": "application/srgs+xml",
".gz": "application/x-gzip",
".htm": "text/html",
".html": "text/html",
".ico": "image/x-icon",
".ics": "text/calendar",
".ifb": "text/calendar",
".iges": "model/iges",
".igs": "model/iges",
".jp2": "image/jp2",
".jpe": "image/jpeg",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".kar": "audio/midi",
".lha": "application/octet-stream",
".lzh": "application/octet-stream",
".m4a": "audio/mp4a-latm",
".m4p": "audio/mp4a-latm",
".m4u": "video/vnd.mpegurl",
".m4v": "video/x-m4v",
".mac": "image/x-macpaint",
".mathml": "application/mathml+xml",
".mesh": "model/mesh",
".mid": "audio/midi",
".midi": "audio/midi",
".mov": "video/quicktime",
".mp2": "audio/mpeg",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpe": "video/mpeg",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".mpga": "audio/mpeg",
".msh": "model/mesh",
".nc": "application/x-netcdf",
".oda": "application/oda",
".ogv": "video/ogv",
".pct": "image/pict",
".pic": "image/pict",
".pict": "image/pict",
".pnt": "image/x-macpaint",
".pntg": "image/x-macpaint",
".ps": "application/postscript",
".qt": "video/quicktime",
".qti": "image/x-quicktime",
".qtif": "image/x-quicktime",
".ram": "audio/x-pn-realaudio",
".rdf": "application/rdf+xml",
".rm": "application/vnd.rn-realmedia",
".roff": "application/x-troff",
".sgm": "text/sgml",
".sgml": "text/sgml",
".silo": "model/mesh",
".skd": "application/x-koan",
".skm": "application/x-koan",
".skp": "application/x-koan",
".skt": "application/x-koan",
".smi": "application/smil",
".smil": "application/smil",
".snd": "audio/basic",
".so": "application/octet-stream",
".svg": "image/svg+xml",
".t": "application/x-troff",
".texi": "application/x-texinfo",
".texinfo": "application/x-texinfo",
".tif": "image/tiff",
".tiff": "image/tiff",
".tr": "application/x-troff",
".txt": "text/plain",
".vrml": "model/vrml",
".vxml": "application/voicexml+xml",
".webm": "video/webm",
".wrl": "model/vrml",
".xht": "application/xhtml+xml",
".xhtml": "application/xhtml+xml",
".xml": "application/xml",
".xsl": "application/xml",
".xslt": "application/xslt+xml",
".xul": "application/vnd.mozilla.xul+xml",
".webp": "image/webp",
".323": "text/h323",
".aab": "application/x-authoware-bin",
".aam": "application/x-authoware-map",
".aas": "application/x-authoware-seg",
".acx": "application/internet-property-stream",
".als": "audio/X-Alpha5",
".amc": "application/x-mpeg",
".ani": "application/octet-stream",
".asd": "application/astound",
".asf": "video/x-ms-asf",
".asn": "application/astound",
".asp": "application/x-asap",
".asr": "video/x-ms-asf",
".asx": "video/x-ms-asf",
".avb": "application/octet-stream",
".awb": "audio/amr-wb",
".axs": "application/olescript",
".bas": "text/plain",
".bin ": "application/octet-stream",
".bld": "application/bld",
".bld2": "application/bld2",
".bpk": "application/octet-stream",
".c": "text/plain",
".cal": "image/x-cals",
".cat": "application/vnd.ms-pkiseccat",
".ccn": "application/x-cnc",
".cco": "application/x-cocoa",
".cer": "application/x-x509-ca-cert",
".cgi": "magnus-internal/cgi",
".chat": "application/x-chat",
".clp": "application/x-msclip",
".cmx": "image/x-cmx",
".co": "application/x-cult3d-object",
".cod": "image/cis-cod",
".conf": "text/plain",
".cpp": "text/plain",
".crd": "application/x-mscardfile",
".crl": "application/pkix-crl",
".crt": "application/x-x509-ca-cert",
".csm": "chemical/x-csml",
".csml": "chemical/x-csml",
".cur": "application/octet-stream",
".dcm": "x-lml/x-evm",
".dcx": "image/x-dcx",
".der": "application/x-x509-ca-cert",
".dhtml": "text/html",
".dot": "application/msword",
".dwf": "drawing/x-dwf",
".dwg": "application/x-autocad",
".dxf": "application/x-autocad",
".ebk": "application/x-expandedbook",
".emb": "chemical/x-embl-dl-nucleotide",
".embl": "chemical/x-embl-dl-nucleotide",
".epub": "application/epub+zip",
".eri": "image/x-eri",
".es": "audio/echospeech",
".esl": "audio/echospeech",
".etc": "application/x-earthtime",
".evm": "x-lml/x-evm",
".evy": "application/envoy",
".fh4": "image/x-freehand",
".fh5": "image/x-freehand",
".fhc": "image/x-freehand",
".fif": "application/fractals",
".flr": "x-world/x-vrml",
".fm": "application/x-maker",
".fpx": "image/x-fpx",
".fvi": "video/isivideo",
".gau": "chemical/x-gaussian-input",
".gca": "application/x-gca-compressed",
".gdb": "x-lml/x-gdb",
".gps": "application/x-gps",
".h": "text/plain",
".hdm": "text/x-hdml",
".hdml": "text/x-hdml",
".hlp": "application/winhlp",
".hta": "application/hta",
".htc": "text/x-component",
".hts": "text/html",
".htt": "text/webviewhtml",
".ifm": "image/gif",
".ifs": "image/ifs",
".iii": "application/x-iphone",
".imy": "audio/melody",
".ins": "application/x-internet-signup",
".ips": "application/x-ipscript",
".ipx": "application/x-ipix",
".isp": "application/x-internet-signup",
".it": "audio/x-mod",
".itz": "audio/x-mod",
".ivr": "i-world/i-vrml",
".j2k": "image/j2k",
".jam": "application/x-jam",
".java": "text/plain",
".jfif": "image/pipeg",
".jpz": "image/jpeg",
".jwc": "application/jwc",
".kjx": "application/x-kjx",
".lak": "x-lml/x-lak",
".lcc": "application/fastman",
".lcl": "application/x-digitalloca",
".lcr": "application/x-digitalloca",
".lgh": "application/lgh",
".lml": "x-lml/x-lml",
".lmlpack": "x-lml/x-lmlpack",
".log": "text/plain",
".lsf": "video/x-la-asf",
".lsx": "video/x-la-asf",
".m13": "application/x-msmediaview",
".m14": "application/x-msmediaview",
".m15": "audio/x-mod",
".m3url": "audio/x-mpegurl",
".m4b": "audio/mp4a-latm",
".ma1": "audio/ma1",
".ma2": "audio/ma2",
".ma3": "audio/ma3",
".ma5": "audio/ma5",
".map": "magnus-internal/imagemap",
".mbd": "application/mbedlet",
".mct": "application/x-mascot",
".mdb": "application/x-msaccess",
".mdz": "audio/x-mod",
".mel": "text/x-vmel",
".mht": "message/rfc822",
".mhtml": "message/rfc822",
".mi": "application/x-mif",
".mil": "image/x-cals",
".mio": "audio/x-mio",
".mmf": "application/x-skt-lbs",
".mng": "video/x-mng",
".mny": "application/x-msmoney",
".moc": "application/x-mocha",
".mocha": "application/x-mocha",
".mod": "audio/x-mod",
".mof": "application/x-yumekara",
".mol": "chemical/x-mdl-molfile",
".mop": "chemical/x-mopac-input",
".mpa": "video/mpeg",
".mpc": "application/vnd.mpohun.certificate",
".mpg4": "video/mp4",
".mpn": "application/vnd.mophun.application",
".mpp": "application/vnd.ms-project",
".mps": "application/x-mapserver",
".mpv2": "video/mpeg",
".mrl": "text/x-mrml",
".mrm": "application/x-mrm",
".msg": "application/vnd.ms-outlook",
".mts": "application/metastream",
".mtx": "application/metastream",
".mtz": "application/metastream",
".mvb": "application/x-msmediaview",
".mzv": "application/metastream",
".nar": "application/zip",
".nbmp": "image/nbmp",
".ndb": "x-lml/x-ndb",
".ndwn": "application/ndwn",
".nif": "application/x-nif",
".nmz": "application/x-scream",
".nokia-op-logo": "image/vnd.nok-oplogo-color",
".npx": "application/x-netfpx",
".nsnd": "audio/nsnd",
".nva": "application/x-neva1",
".nws": "message/rfc822",
".oom": "application/x-AtlasMate-Plugin",
".p10": "application/pkcs10",
".p12": "application/x-pkcs12",
".p7b": "application/x-pkcs7-certificates",
".p7c": "application/x-pkcs7-mime",
".p7m": "application/x-pkcs7-mime",
".p7r": "application/x-pkcs7-certreqresp",
".p7s": "application/x-pkcs7-signature",
".pac": "audio/x-pac",
".pae": "audio/x-epac",
".pan": "application/x-pan",
".pcx": "image/x-pcx",
".pda": "image/x-pda",
".pfr": "application/font-tdpfr",
".pfx": "application/x-pkcs12",
".pko": "application/ynd.ms-pkipko",
".pm": "application/x-perl",
".pma": "application/x-perfmon",
".pmc": "application/x-perfmon",
".pmd": "application/x-pmd",
".pml": "application/x-perfmon",
".pmr": "application/x-perfmon",
".pmw": "application/x-perfmon",
".pnz": "image/png",
".pot,": "application/vnd.ms-powerpoint",
".pps": "application/vnd.ms-powerpoint",
".pqf": "application/x-cprplayer",
".pqi": "application/cprplayer",
".prc": "application/x-prc",
".prf": "application/pics-rules",
".prop": "text/plain",
".proxy": "application/x-ns-proxy-autoconfig",
".ptlk": "application/listenup",
".pub": "application/x-mspublisher",
".pvx": "video/x-pv-pvx",
".qcp": "audio/vnd.qcelp",
".r3t": "text/vnd.rn-realtext3d",
".rar": "application/octet-stream",
".rc": "text/plain",
".rf": "image/vnd.rn-realflash",
".rlf": "application/x-richlink",
".rmf": "audio/x-rmf",
".rmi": "audio/mid",
".rmm": "audio/x-pn-realaudio",
".rmvb": "audio/x-pn-realaudio",
".rnx": "application/vnd.rn-realplayer",
".rp": "image/vnd.rn-realpix",
".rt": "text/vnd.rn-realtext",
".rte": "x-lml/x-gps",
".rtg": "application/metastream",
".rv": "video/vnd.rn-realvideo",
".rwc": "application/x-rogerwilco",
".s3m": "audio/x-mod",
".s3z": "audio/x-mod",
".sca": "application/x-supercard",
".scd": "application/x-msschedule",
".sct": "text/scriptlet",
".sdf": "application/e-score",
".sea": "application/x-stuffit",
".setpay": "application/set-payment-initiation",
".setreg": "application/set-registration-initiation",
".shtml": "text/html",
".shtm": "text/html",
".shw": "application/presentations",
".si6": "image/si6",
".si7": "image/vnd.stiwap.sis",
".si9": "image/vnd.lgtwap.sis",
".slc": "application/x-salsa",
".smd": "audio/x-smd",
".smp": "application/studiom",
".smz": "audio/x-smd",
".spc": "application/x-pkcs7-certificates",
".spr": "application/x-sprite",
".sprite": "application/x-sprite",
".sdp": "application/sdp",
".spt": "application/x-spt",
".sst": "application/vnd.ms-pkicertstore",
".stk": "application/hyperstudio",
".stl": "application/vnd.ms-pkistl",
".stm": "text/html",
".svf": "image/vnd",
".svh": "image/svh",
".svr": "x-world/x-svr",
".swfl": "application/x-shockwave-flash",
".tad": "application/octet-stream",
".talk": "text/x-speech",
".taz": "application/x-tar",
".tbp": "application/x-timbuktu",
".tbt": "application/x-timbuktu",
".tgz": "application/x-compressed",
".thm": "application/vnd.eri.thm",
".tki": "application/x-tkined",
".tkined": "application/x-tkined",
".toc": "application/toc",
".toy": "image/toy",
".trk": "x-lml/x-gps",
".trm": "application/x-msterminal",
".tsi": "audio/tsplayer",
".tsp": "application/dsptype",
".ttf": "application/octet-stream",
".ttz": "application/t-time",
".uls": "text/iuls",
".ult": "audio/x-mod",
".uu": "application/x-uuencode",
".uue": "application/x-uuencode",
".vcf": "text/x-vcard",
".vdo": "video/vdo",
".vib": "audio/vib",
".viv": "video/vivo",
".vivo": "video/vivo",
".vmd": "application/vocaltec-media-desc",
".vmf": "application/vocaltec-media-file",
".vmi": "application/x-dreamcast-vms-info",
".vms": "application/x-dreamcast-vms",
".vox": "audio/voxware",
".vqe": "audio/x-twinvq-plugin",
".vqf": "audio/x-twinvq",
".vql": "audio/x-twinvq",
".vre": "x-world/x-vream",
".vrt": "x-world/x-vrt",
".vrw": "x-world/x-vream",
".vts": "workbook/formulaone",
".wcm": "application/vnd.ms-works",
".wdb": "application/vnd.ms-works",
".web": "application/vnd.xara",
".wi": "image/wavelet",
".wis": "application/x-InstallShield",
".wks": "application/vnd.ms-works",
".wmd": "application/x-ms-wmd",
".wmf": "application/x-msmetafile",
".wmlscript": "text/vnd.wap.wmlscript",
".wmz": "application/x-ms-wmz",
".wpng": "image/x-up-wpng",
".wps": "application/vnd.ms-works",
".wpt": "x-lml/x-gps",
".wri": "application/x-mswrite",
".wrz": "x-world/x-vrml",
".ws": "text/vnd.wap.wmlscript",
".wsc": "application/vnd.wap.wmlscriptc",
".wv": "video/wavelet",
".wxl": "application/x-wxl",
".x-gzip": "application/x-gzip",
".xaf": "x-world/x-vrml",
".xar": "application/vnd.xara",
".xdm": "application/x-xdma",
".xdma": "application/x-xdma",
".xdw": "application/vnd.fujixerox.docuworks",
".xhtm": "application/xhtml+xml",
".xla": "application/vnd.ms-excel",
".xlc": "application/vnd.ms-excel",
".xll": "application/x-excel",
".xlm": "application/vnd.ms-excel",
".xlt": "application/vnd.ms-excel",
".xlw": "application/vnd.ms-excel",
".xm": "audio/x-mod",
".xmz": "audio/x-mod",
".xof": "x-world/x-vrml",
".xpi": "application/x-xpinstall",
".xsit": "text/xml",
".yz1": "application/x-yz1",
".z": "application/x-compress",
".zac": "application/x-zaurus-zac",
".json": "application/json",
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// gets the file's MIME type for HTTP header Content-Type
func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
} else {
if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") {
typ = removeCharsetInMimeType(typ)
}
}
return typ
}
// Remove charset from mime type
func removeCharsetInMimeType(typ string) (str string) {
temArr := strings.Split(typ, ";")
var builder strings.Builder
for i, s := range temArr {
tmpStr := strings.Trim(s, " ")
if strings.Contains(tmpStr, "charset=") {
continue
}
if i == 0 {
builder.WriteString(s)
} else {
builder.WriteString("; " + s)
}
}
return builder.String()
}

@ -1,69 +0,0 @@
package oss
import (
"hash"
"io"
"net/http"
)
// Response defines HTTP response from OSS
type Response struct {
StatusCode int
Headers http.Header
Body io.ReadCloser
ClientCRC uint64
ServerCRC uint64
}
func (r *Response) Read(p []byte) (n int, err error) {
return r.Body.Read(p)
}
// Close close http reponse body
func (r *Response) Close() error {
return r.Body.Close()
}
// PutObjectRequest is the request of DoPutObject
type PutObjectRequest struct {
ObjectKey string
Reader io.Reader
}
// GetObjectRequest is the request of DoGetObject
type GetObjectRequest struct {
ObjectKey string
}
// GetObjectResult is the result of DoGetObject
type GetObjectResult struct {
Response *Response
ClientCRC hash.Hash64
ServerCRC uint64
}
// AppendObjectRequest is the requtest of DoAppendObject
type AppendObjectRequest struct {
ObjectKey string
Reader io.Reader
Position int64
}
// AppendObjectResult is the result of DoAppendObject
type AppendObjectResult struct {
NextPosition int64
CRC uint64
}
// UploadPartRequest is the request of DoUploadPart
type UploadPartRequest struct {
InitResult *InitiateMultipartUploadResult
Reader io.Reader
PartSize int64
PartNumber int
}
// UploadPartResult is the result of DoUploadPart
type UploadPartResult struct {
Part UploadPart
}

@ -1,474 +0,0 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
)
// CopyFile is multipart copy object
//
// srcBucketName source bucket name
// srcObjectKey source object name
// destObjectKey target object name in the form of bucketname.objectkey
// partSize the part size in byte.
// options object's contraints. Check out function InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
destBucketName := bucket.BucketName
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
var strVersionId string
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
strVersionId = versionId.(string)
}
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
if cpFilePath != "" {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
}
}
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, routines)
}
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
cpFileName := getCpFileName(src, dest, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// ----- Concurrently copy without checkpoint ---------
// copyWorkerArg defines the copy worker arguments
type copyWorkerArg struct {
bucket *Bucket
imur InitiateMultipartUploadResult
srcBucketName string
srcObjectKey string
options []Option
hook copyPartHook
}
// copyPartHook is the hook for testing purpose
type copyPartHook func(part copyPart) error
var copyPartHooker copyPartHook = defaultCopyPartHook
func defaultCopyPartHook(part copyPart) error {
return nil
}
// copyWorker copies worker
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(chunk); err != nil {
failed <- err
break
}
chunkSize := chunk.End - chunk.Start + 1
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
chunk.Start, chunkSize, chunk.Number, arg.options...)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// copyScheduler
func copyScheduler(jobs chan copyPart, parts []copyPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// copyPart structure
type copyPart struct {
Number int // Part number (from 1 to 10,000)
Start int64 // The start index in the source file.
End int64 // The end index in the source file
}
// getCopyParts calculates copy parts
func getCopyParts(objectSize, partSize int64) []copyPart {
parts := []copyPart{}
part := copyPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
part.Number = i + 1
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
parts = append(parts, part)
i++
}
return parts
}
// getSrcObjectBytes gets the source file size
func getSrcObjectBytes(parts []copyPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// copyFile is a concurrently copy without checkpoint
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := GetProgressListener(options)
// choice valid options
headerOptions := ChoiceHeadObjectOption(options)
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
abortOptions := ChoiceAbortPartOption(options)
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
// Get copy parts
parts := getCopyParts(objectSize, partSize)
// Initialize the multipart upload
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getSrcObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start to copy workers
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts finished.
completed := 0
ups := make([]UploadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ups[part.PartNumber-1] = part
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
completedBytes += copyBytes
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
descBucket.AbortMultipartUpload(imur, abortOptions...)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
// Complete the multipart upload
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
if err != nil {
bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
return nil
}
// ----- Concurrently copy with checkpoint -----
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
type copyCheckpoint struct {
Magic string // Magic
MD5 string // CP content MD5
SrcBucketName string // Source bucket
SrcObjectKey string // Source object
DestBucketName string // Target bucket
DestObjectKey string // Target object
CopyID string // Copy ID
ObjStat objectStat // Object stat
Parts []copyPart // Copy parts
CopyParts []UploadPart // The uploaded parts
PartStat []bool // The part status
}
// isValid checks if the data is valid which means CP is valid and object is not updated.
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
// Compare CP's magic number and the MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
// Compare the object size and last modified time and etag.
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
return true, nil
}
// load loads from the checkpoint file
func (cp *copyCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// update updates the parts status
func (cp *copyCheckpoint) update(part UploadPart) {
cp.CopyParts[part.PartNumber-1] = part
cp.PartStat[part.PartNumber-1] = true
}
// dump dumps the CP to the file
func (cp *copyCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts returns unfinished parts
func (cp copyCheckpoint) todoParts() []copyPart {
dps := []copyPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes returns finished bytes count
func (cp copyCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initializes the multipart upload
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
partSize int64, options []Option) error {
// CP
cp.Magic = copyCpMagic
cp.SrcBucketName = srcBucket.BucketName
cp.SrcObjectKey = srcObjectKey
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// Parts
cp.Parts = getCopyParts(objectSize, partSize)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
cp.CopyParts = make([]UploadPart, len(cp.Parts))
// Init copy
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
cp.CopyID = imur.UploadID
return nil
}
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
Key: cp.DestObjectKey, UploadID: cp.CopyID}
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// copyFileWithCp is concurrently copy with checkpoint
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := GetProgressListener(options)
// Load CP data
ccp := copyCheckpoint{}
err = ccp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// choice valid options
headerOptions := ChoiceHeadObjectOption(options)
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
// Load error or the CP data is invalid---reinitialize
valid, err := ccp.isValid(meta)
if err != nil || !valid {
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Unfinished parts
parts := ccp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: destBucketName,
Key: destObjectKey,
UploadID: ccp.CopyID}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := ccp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the worker coroutines
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts completed.
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
ccp.update(part)
ccp.dump(cpFilePath)
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
completedBytes += copyBytes
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
}

@ -1,305 +0,0 @@
package oss
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"net/url"
"os"
"sort"
"strconv"
)
// InitiateMultipartUpload initializes multipart upload
//
// objectKey object name
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
// ServerSideEncryption, Meta, check out the following link:
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
//
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
opts := AddContentType(options, objectKey)
params, _ := GetRawParams(options)
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
ConvertEmptyValueToNil(params, paramKeys)
params["uploads"] = nil
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
if err != nil {
return imur, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &imur)
return imur, err
}
// UploadPart uploads parts
//
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
// And thus with the same part number and upload Id, another part upload will overwrite the data.
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
//
// imur the returned value of InitiateMultipartUpload.
// reader io.Reader the reader for the part's data.
// size the part size.
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
//
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
request := &UploadPartRequest{
InitResult: &imur,
Reader: reader,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// UploadPartFromFile uploads part from the file.
//
// imur the return value of a successful InitiateMultipartUpload.
// filePath the local file path to upload.
// startPosition the start position in the local file.
// partSize the part size.
// partNumber the part number (from 1 to 10,000)
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var part = UploadPart{}
fd, err := os.Open(filePath)
if err != nil {
return part, err
}
defer fd.Close()
fd.Seek(startPosition, os.SEEK_SET)
request := &UploadPartRequest{
InitResult: &imur,
Reader: fd,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// DoUploadPart does the actual part upload.
//
// request part upload request
//
// UploadPartResult the result of uploading part.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
listener := GetProgressListener(options)
options = append(options, ContentLength(request.PartSize))
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(request.PartNumber)
params["uploadId"] = request.InitResult.UploadID
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
if err != nil {
return &UploadPartResult{}, err
}
defer resp.Body.Close()
part := UploadPart{
ETag: resp.Headers.Get(HTTPHeaderEtag),
PartNumber: request.PartNumber,
}
if bucket.GetConfig().IsEnableCRC {
err = CheckCRC(resp, "DoUploadPart")
if err != nil {
return &UploadPartResult{part}, err
}
}
return &UploadPartResult{part}, nil
}
// UploadPartCopy uploads part copy
//
// imur the return value of InitiateMultipartUpload
// copySrc source Object name
// startPosition the part's start index in the source file
// partSize the part size
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
var opts []Option
//first find version id
versionIdKey := "versionId"
versionId, _ := FindOption(options, versionIdKey, nil)
if versionId == nil {
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
CopySourceRange(startPosition, partSize)}
} else {
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
CopySourceRange(startPosition, partSize)}
options = DeleteOption(options, versionIdKey)
}
opts = append(opts, options...)
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(partNumber)
params["uploadId"] = imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
if err != nil {
return part, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return part, err
}
part.ETag = out.ETag
part.PartNumber = partNumber
return part, nil
}
// CompleteMultipartUpload completes the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
//
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
sort.Sort(UploadParts(parts))
cxml := completeMultipartUploadXML{}
cxml.Part = parts
bs, err := xml.Marshal(cxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// AbortMultipartUpload aborts the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// ListUploadedParts lists the uploaded parts.
//
// imur the return value of InitiateMultipartUpload.
//
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
options = append(options, EncodingType("url"))
params := map[string]interface{}{}
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListUploadedPartsResult(&out)
return out, err
}
// ListMultipartUploads lists all ongoing multipart upload tasks
//
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
//
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploads"] = nil
resp, err := bucket.do("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListMultipartUploadResult(&out)
return out, err
}

@ -1,689 +0,0 @@
package oss
import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
type optionType string
const (
optionParam optionType = "HTTPParameter" // URL parameter
optionHTTP optionType = "HTTPHeader" // HTTP header
optionArg optionType = "FuncArgument" // Function argument
)
const (
deleteObjectsQuiet = "delete-objects-quiet"
routineNum = "x-routine-num"
checkpointConfig = "x-cp-config"
initCRC64 = "init-crc64"
progressListener = "x-progress-listener"
storageClass = "storage-class"
responseHeader = "x-response-header"
redundancyType = "redundancy-type"
objectHashFunc = "object-hash-func"
)
type (
optionValue struct {
Value interface{}
Type optionType
}
// Option HTTP option
Option func(map[string]optionValue) error
)
// ACL is an option to set X-Oss-Acl header
func ACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssACL, string(acl))
}
// ContentType is an option to set Content-Type header
func ContentType(value string) Option {
return setHeader(HTTPHeaderContentType, value)
}
// ContentLength is an option to set Content-Length header
func ContentLength(length int64) Option {
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
}
// CacheControl is an option to set Cache-Control header
func CacheControl(value string) Option {
return setHeader(HTTPHeaderCacheControl, value)
}
// ContentDisposition is an option to set Content-Disposition header
func ContentDisposition(value string) Option {
return setHeader(HTTPHeaderContentDisposition, value)
}
// ContentEncoding is an option to set Content-Encoding header
func ContentEncoding(value string) Option {
return setHeader(HTTPHeaderContentEncoding, value)
}
// ContentLanguage is an option to set Content-Language header
func ContentLanguage(value string) Option {
return setHeader(HTTPHeaderContentLanguage, value)
}
// ContentMD5 is an option to set Content-MD5 header
func ContentMD5(value string) Option {
return setHeader(HTTPHeaderContentMD5, value)
}
// Expires is an option to set Expires header
func Expires(t time.Time) Option {
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
}
// Meta is an option to set Meta header
func Meta(key, value string) Option {
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
}
// Range is an option to set Range header, [start, end]
func Range(start, end int64) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
}
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
func NormalizedRange(nr string) Option {
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
}
// AcceptEncoding is an option to set Accept-Encoding header
func AcceptEncoding(value string) Option {
return setHeader(HTTPHeaderAcceptEncoding, value)
}
// IfModifiedSince is an option to set If-Modified-Since header
func IfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
}
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
func IfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// IfMatch is an option to set If-Match header
func IfMatch(value string) Option {
return setHeader(HTTPHeaderIfMatch, value)
}
// IfNoneMatch is an option to set IfNoneMatch header
func IfNoneMatch(value string) Option {
return setHeader(HTTPHeaderIfNoneMatch, value)
}
// CopySource is an option to set X-Oss-Copy-Source header
func CopySource(sourceBucket, sourceObject string) Option {
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
}
// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option {
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId)
}
// CopySourceRange is an option to set X-Oss-Copy-Source header
func CopySourceRange(startPosition, partSize int64) Option {
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
strconv.FormatInt((startPosition+partSize-1), 10)
return setHeader(HTTPHeaderOssCopySourceRange, val)
}
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
func CopySourceIfMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
}
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
func CopySourceIfNoneMatch(value string) Option {
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
}
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
func CopySourceIfModifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
}
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
func CopySourceIfUnmodifiedSince(t time.Time) Option {
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
}
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
func MetadataDirective(directive MetadataDirectiveType) Option {
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
}
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
func ServerSideEncryption(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryption, value)
}
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
func ServerSideEncryptionKeyID(value string) Option {
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
}
// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
func ServerSideDataEncryption(value string) Option {
return setHeader(HTTPHeaderOssServerSideDataEncryption, value)
}
// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
func SSECAlgorithm(value string) Option {
return setHeader(HTTPHeaderSSECAlgorithm, value)
}
// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
func SSECKey(value string) Option {
return setHeader(HTTPHeaderSSECKey, value)
}
// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
func SSECKeyMd5(value string) Option {
return setHeader(HTTPHeaderSSECKeyMd5, value)
}
// ObjectACL is an option to set X-Oss-Object-Acl header
func ObjectACL(acl ACLType) Option {
return setHeader(HTTPHeaderOssObjectACL, string(acl))
}
// symlinkTarget is an option to set X-Oss-Symlink-Target
func symlinkTarget(targetObjectKey string) Option {
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
}
// Origin is an option to set Origin header
func Origin(value string) Option {
return setHeader(HTTPHeaderOrigin, value)
}
// ObjectStorageClass is an option to set the storage class of object
func ObjectStorageClass(storageClass StorageClassType) Option {
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
}
// Callback is an option to set callback values
func Callback(callback string) Option {
return setHeader(HTTPHeaderOssCallback, callback)
}
// CallbackVar is an option to set callback user defined values
func CallbackVar(callbackVar string) Option {
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
}
// RequestPayer is an option to set payer who pay for the request
func RequestPayer(payerType PayerType) Option {
return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
}
// RequestPayerParam is an option to set payer who pay for the request
func RequestPayerParam(payerType PayerType) Option {
return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType)))
}
// SetTagging is an option to set object tagging
func SetTagging(tagging Tagging) Option {
if len(tagging.Tags) == 0 {
return nil
}
taggingValue := ""
for index, tag := range tagging.Tags {
if index != 0 {
taggingValue += "&"
}
taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value)
}
return setHeader(HTTPHeaderOssTagging, taggingValue)
}
// TaggingDirective is an option to set X-Oss-Metadata-Directive header
func TaggingDirective(directive TaggingDirectiveType) Option {
return setHeader(HTTPHeaderOssTaggingDirective, string(directive))
}
// ACReqMethod is an option to set Access-Control-Request-Method header
func ACReqMethod(value string) Option {
return setHeader(HTTPHeaderACReqMethod, value)
}
// ACReqHeaders is an option to set Access-Control-Request-Headers header
func ACReqHeaders(value string) Option {
return setHeader(HTTPHeaderACReqHeaders, value)
}
// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
func TrafficLimitHeader(value int64) Option {
return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
}
// UserAgentHeader is an option to set HTTPHeaderUserAgent
func UserAgentHeader(ua string) Option {
return setHeader(HTTPHeaderUserAgent, ua)
}
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
func ForbidOverWrite(forbidWrite bool) Option {
if forbidWrite {
return setHeader(HTTPHeaderOssForbidOverWrite, "true")
} else {
return setHeader(HTTPHeaderOssForbidOverWrite, "false")
}
}
// RangeBehavior is an option to set Range value, such as "standard"
func RangeBehavior(value string) Option {
return setHeader(HTTPHeaderOssRangeBehavior, value)
}
func PartHashCtxHeader(value string) Option {
return setHeader(HTTPHeaderOssHashCtx, value)
}
func PartMd5CtxHeader(value string) Option {
return setHeader(HTTPHeaderOssMd5Ctx, value)
}
func PartHashCtxParam(value string) Option {
return addParam("x-oss-hash-ctx", value)
}
func PartMd5CtxParam(value string) Option {
return addParam("x-oss-md5-ctx", value)
}
// Delimiter is an option to set delimiler parameter
func Delimiter(value string) Option {
return addParam("delimiter", value)
}
// Marker is an option to set marker parameter
func Marker(value string) Option {
return addParam("marker", value)
}
// MaxKeys is an option to set maxkeys parameter
func MaxKeys(value int) Option {
return addParam("max-keys", strconv.Itoa(value))
}
// Prefix is an option to set prefix parameter
func Prefix(value string) Option {
return addParam("prefix", value)
}
// EncodingType is an option to set encoding-type parameter
func EncodingType(value string) Option {
return addParam("encoding-type", value)
}
// MaxUploads is an option to set max-uploads parameter
func MaxUploads(value int) Option {
return addParam("max-uploads", strconv.Itoa(value))
}
// KeyMarker is an option to set key-marker parameter
func KeyMarker(value string) Option {
return addParam("key-marker", value)
}
// VersionIdMarker is an option to set version-id-marker parameter
func VersionIdMarker(value string) Option {
return addParam("version-id-marker", value)
}
// VersionId is an option to set versionId parameter
func VersionId(value string) Option {
return addParam("versionId", value)
}
// TagKey is an option to set tag key parameter
func TagKey(value string) Option {
return addParam("tag-key", value)
}
// TagValue is an option to set tag value parameter
func TagValue(value string) Option {
return addParam("tag-value", value)
}
// UploadIDMarker is an option to set upload-id-marker parameter
func UploadIDMarker(value string) Option {
return addParam("upload-id-marker", value)
}
// MaxParts is an option to set max-parts parameter
func MaxParts(value int) Option {
return addParam("max-parts", strconv.Itoa(value))
}
// PartNumberMarker is an option to set part-number-marker parameter
func PartNumberMarker(value int) Option {
return addParam("part-number-marker", strconv.Itoa(value))
}
// Sequential is an option to set sequential parameter for InitiateMultipartUpload
func Sequential() Option {
return addParam("sequential", "")
}
// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
func WithHashContext() Option {
return addParam("withHashContext", "")
}
// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
func EnableMd5() Option {
return addParam("x-oss-enable-md5", "")
}
// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
func EnableSha1() Option {
return addParam("x-oss-enable-sha1", "")
}
// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
func EnableSha256() Option {
return addParam("x-oss-enable-sha256", "")
}
// ListType is an option to set List-type parameter for ListObjectsV2
func ListType(value int) Option {
return addParam("list-type", strconv.Itoa(value))
}
// StartAfter is an option to set start-after parameter for ListObjectsV2
func StartAfter(value string) Option {
return addParam("start-after", value)
}
// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
func ContinuationToken(value string) Option {
if value == "" {
return addParam("continuation-token", nil)
}
return addParam("continuation-token", value)
}
// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
func FetchOwner(value bool) Option {
if value {
return addParam("fetch-owner", "true")
}
return addParam("fetch-owner", "false")
}
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
func DeleteObjectsQuiet(isQuiet bool) Option {
return addArg(deleteObjectsQuiet, isQuiet)
}
// StorageClass bucket storage class
func StorageClass(value StorageClassType) Option {
return addArg(storageClass, value)
}
// RedundancyType bucket data redundancy type
func RedundancyType(value DataRedundancyType) Option {
return addArg(redundancyType, value)
}
// RedundancyType bucket data redundancy type
func ObjectHashFunc(value ObjecthashFuncType) Option {
return addArg(objectHashFunc, value)
}
// Checkpoint configuration
type cpConfig struct {
IsEnable bool
FilePath string
DirPath string
}
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
func Checkpoint(isEnable bool, filePath string) Option {
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
}
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
func CheckpointDir(isEnable bool, dirPath string) Option {
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
}
// Routines DownloadFile/UploadFile routine count
func Routines(n int) Option {
return addArg(routineNum, n)
}
// InitCRC Init AppendObject CRC
func InitCRC(initCRC uint64) Option {
return addArg(initCRC64, initCRC)
}
// Progress set progress listener
func Progress(listener ProgressListener) Option {
return addArg(progressListener, listener)
}
// GetResponseHeader for get response http header
func GetResponseHeader(respHeader *http.Header) Option {
return addArg(responseHeader, respHeader)
}
// ResponseContentType is an option to set response-content-type param
func ResponseContentType(value string) Option {
return addParam("response-content-type", value)
}
// ResponseContentLanguage is an option to set response-content-language param
func ResponseContentLanguage(value string) Option {
return addParam("response-content-language", value)
}
// ResponseExpires is an option to set response-expires param
func ResponseExpires(value string) Option {
return addParam("response-expires", value)
}
// ResponseCacheControl is an option to set response-cache-control param
func ResponseCacheControl(value string) Option {
return addParam("response-cache-control", value)
}
// ResponseContentDisposition is an option to set response-content-disposition param
func ResponseContentDisposition(value string) Option {
return addParam("response-content-disposition", value)
}
// ResponseContentEncoding is an option to set response-content-encoding param
func ResponseContentEncoding(value string) Option {
return addParam("response-content-encoding", value)
}
// Process is an option to set x-oss-process param
func Process(value string) Option {
return addParam("x-oss-process", value)
}
// TrafficLimitParam is a option to set x-oss-traffic-limit
func TrafficLimitParam(value int64) Option {
return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
}
// SetHeader Allow users to set personalized http headers
func SetHeader(key string, value interface{}) Option {
return setHeader(key, value)
}
// AddParam Allow users to set personalized http params
func AddParam(key string, value interface{}) Option {
return addParam(key, value)
}
func setHeader(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionHTTP}
return nil
}
}
func addParam(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionParam}
return nil
}
}
func addArg(key string, value interface{}) Option {
return func(params map[string]optionValue) error {
if value == nil {
return nil
}
params[key] = optionValue{value, optionArg}
return nil
}
}
func handleOptions(headers map[string]string, options []Option) error {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return err
}
}
}
for k, v := range params {
if v.Type == optionHTTP {
headers[k] = v.Value.(string)
}
}
return nil
}
func GetRawParams(options []Option) (map[string]interface{}, error) {
// Option
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return nil, err
}
}
}
paramsm := map[string]interface{}{}
// Serialize
for k, v := range params {
if v.Type == optionParam {
vs := params[k]
paramsm[k] = vs.Value.(string)
}
}
return paramsm, nil
}
func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return nil, err
}
}
}
if val, ok := params[param]; ok {
return val.Value, nil
}
return defaultVal, nil
}
func IsOptionSet(options []Option, option string) (bool, interface{}, error) {
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
if err := option(params); err != nil {
return false, nil, err
}
}
}
if val, ok := params[option]; ok {
return true, val.Value, nil
}
return false, nil, nil
}
func DeleteOption(options []Option, strKey string) []Option {
var outOption []Option
params := map[string]optionValue{}
for _, option := range options {
if option != nil {
option(params)
_, exist := params[strKey]
if !exist {
outOption = append(outOption, option)
} else {
delete(params, strKey)
}
}
}
return outOption
}
func GetRequestId(header http.Header) string {
return header.Get("x-oss-request-id")
}
func GetVersionId(header http.Header) string {
return header.Get("x-oss-version-id")
}
func GetCopySrcVersionId(header http.Header) string {
return header.Get("x-oss-copy-source-version-id")
}
func GetDeleteMark(header http.Header) bool {
value := header.Get("x-oss-delete-marker")
if strings.ToUpper(value) == "TRUE" {
return true
}
return false
}
func GetQosDelayTime(header http.Header) string {
return header.Get("x-oss-qos-delay-time")
}
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
func AllowSameActionOverLap(enabled bool) Option {
if enabled {
return setHeader(HTTPHeaderAllowSameActionOverLap, "true")
} else {
return setHeader(HTTPHeaderAllowSameActionOverLap, "false")
}
}

@ -1,116 +0,0 @@
package oss
import (
"io"
)
// ProgressEventType defines transfer progress event type
type ProgressEventType int
const (
// TransferStartedEvent transfer started, set TotalBytes
TransferStartedEvent ProgressEventType = 1 + iota
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
TransferDataEvent
// TransferCompletedEvent transfer completed
TransferCompletedEvent
// TransferFailedEvent transfer encounters an error
TransferFailedEvent
)
// ProgressEvent defines progress event
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
RwBytes int64
EventType ProgressEventType
}
// ProgressListener listens progress change
type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}
// -------------------- Private --------------------
func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
RwBytes: rwBytes,
EventType: eventType}
}
// publishProgress
func publishProgress(listener ProgressListener, event *ProgressEvent) {
if listener != nil && event != nil {
listener.ProgressChanged(event)
}
}
type readerTracker struct {
completedBytes int64
}
type teeReader struct {
reader io.Reader
writer io.Writer
listener ProgressListener
consumedBytes int64
totalBytes int64
tracker *readerTracker
}
// TeeReader returns a Reader that writes to w what it reads from r.
// All reads from r performed through it are matched with
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
return &teeReader{
reader: reader,
writer: writer,
listener: listener,
consumedBytes: 0,
totalBytes: totalBytes,
tracker: tracker,
}
}
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
// Read encountered error
if err != nil && err != io.EOF {
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
publishProgress(t.listener, event)
}
if n > 0 {
t.consumedBytes += int64(n)
// CRC
if t.writer != nil {
if n, err := t.writer.Write(p[:n]); err != nil {
return n, err
}
}
// Progress
if t.listener != nil {
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
publishProgress(t.listener, event)
}
// Track
if t.tracker != nil {
t.tracker.completedBytes = t.consumedBytes
}
}
return
}
func (t *teeReader) Close() error {
if rc, ok := t.reader.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}

@ -1,12 +0,0 @@
//go:build !go1.7
// +build !go1.7
package oss
import "net/http"
// http.ErrUseLastResponse only is defined go1.7 onward
func disableHTTPRedirect(client *http.Client) {
}

@ -1,13 +0,0 @@
//go:build go1.7
// +build go1.7
package oss
import "net/http"
// http.ErrUseLastResponse only is defined go1.7 onward
func disableHTTPRedirect(client *http.Client) {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}

@ -1,197 +0,0 @@
package oss
import (
"bytes"
"encoding/xml"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
)
// CreateSelectCsvObjectMeta is Creating csv object meta
//
// key the object key.
// csvMeta the csv file meta
// options the options for create csv Meta of the object.
//
// MetaEndFrameCSV the csv file meta info
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) {
var endFrame MetaEndFrameCSV
params := map[string]interface{}{}
params["x-oss-process"] = "csv/meta"
csvMeta.encodeBase64()
bs, err := xml.Marshal(csvMeta)
if err != nil {
return endFrame, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
if err != nil {
return endFrame, err
}
defer resp.Body.Close()
_, err = ioutil.ReadAll(resp)
return resp.Frame.MetaEndFrameCSV, err
}
// CreateSelectJsonObjectMeta is Creating json object meta
//
// key the object key.
// csvMeta the json file meta
// options the options for create json Meta of the object.
//
// MetaEndFrameJSON the json file meta info
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) {
var endFrame MetaEndFrameJSON
params := map[string]interface{}{}
params["x-oss-process"] = "json/meta"
bs, err := xml.Marshal(jsonMeta)
if err != nil {
return endFrame, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
if err != nil {
return endFrame, err
}
defer resp.Body.Close()
_, err = ioutil.ReadAll(resp)
return resp.Frame.MetaEndFrameJSON, err
}
// SelectObject is the select object api, approve csv and json file.
//
// key the object key.
// selectReq the request data for select object
// options the options for select file of the object.
//
// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) {
params := map[string]interface{}{}
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
params["x-oss-process"] = "csv/select" // default select csv file
} else {
params["x-oss-process"] = "json/select"
}
selectReq.encodeBase64()
bs, err := xml.Marshal(selectReq)
if err != nil {
return nil, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
if err != nil {
return nil, err
}
if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true {
resp.Frame.EnablePayloadCrc = true
}
resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE"
return resp, err
}
// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
//
// key the object key.
// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
// buf the request data trans to buffer.
// options the options for select file of the object.
//
// SelectObjectResponse the response of select object.
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) {
resp, err := bucket.do("POST", key, params, options, buf, nil)
if err != nil {
return nil, err
}
result := &SelectObjectResponse{
Body: resp.Body,
StatusCode: resp.StatusCode,
Frame: SelectObjectResult{},
}
result.Headers = resp.Headers
// result.Frame = SelectObjectResult{}
result.ReadTimeOut = bucket.GetConfig().Timeout
// Progress
listener := GetProgressListener(options)
// CRC32
crcCalc := crc32.NewIEEE()
result.WriterForCheckCrc32 = crcCalc
result.Body = TeeReader(resp.Body, nil, 0, listener, nil)
err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK})
return result, err
}
// SelectObjectIntoFile is the selectObject to file api
//
// key the object key.
// fileName saving file's name to localstation.
// selectReq the request data for select object
// options the options for select file of the object.
//
// error it's nil if no error, otherwise it's an error object.
//
func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error {
tempFilePath := fileName + TempFileSuffix
params := map[string]interface{}{}
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
params["x-oss-process"] = "csv/select" // default select csv file
} else {
params["x-oss-process"] = "json/select"
}
selectReq.encodeBase64()
bs, err := xml.Marshal(selectReq)
if err != nil {
return err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
if err != nil {
return err
}
defer resp.Close()
// If the local file does not exist, create a new one. If it exists, overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
if err != nil {
return err
}
// Copy the data to the local file path.
_, err = io.Copy(fd, resp)
fd.Close()
if err != nil {
return err
}
return os.Rename(tempFilePath, fileName)
}

@ -1,365 +0,0 @@
package oss
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"io"
"net/http"
"time"
)
// The adapter class for Select object's response.
// The response consists of frames. Each frame has the following format:
// Type | Payload Length | Header Checksum | Payload | Payload Checksum
// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
// And we have three kind of frames.
// Data Frame:
// Type:8388609
// Payload: Offset | Data
// <-8 bytes>
// Continuous Frame
// Type:8388612
// Payload: Offset (8-bytes)
// End Frame
// Type:8388613
// Payload: Offset | total scanned bytes | http status code | error message
// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
// SelectObjectResponse defines HTTP response from OSS SelectObject
type SelectObjectResponse struct {
StatusCode int
Headers http.Header
Body io.ReadCloser
Frame SelectObjectResult
ReadTimeOut uint
ClientCRC32 uint32
ServerCRC32 uint32
WriterForCheckCrc32 hash.Hash32
Finish bool
}
func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) {
n, err = sr.readFrames(p)
return
}
// Close http reponse body
func (sr *SelectObjectResponse) Close() error {
return sr.Body.Close()
}
// PostSelectResult is the request of SelectObject
type PostSelectResult struct {
Response *SelectObjectResponse
}
// readFrames is read Frame
func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) {
var nn int
var err error
var checkValid bool
if sr.Frame.OutputRawData == true {
nn, err = sr.Body.Read(p)
return nn, err
}
if sr.Finish {
return 0, io.EOF
}
for {
// if this Frame is Readed, then not reading Header
if sr.Frame.OpenLine != true {
err = sr.analysisHeader()
if err != nil {
return nn, err
}
}
if sr.Frame.FrameType == DataFrameType {
n, err := sr.analysisData(p[nn:])
if err != nil {
return nn, err
}
nn += n
// if this Frame is readed all data, then empty the Frame to read it with next frame
if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 {
checkValid, err = sr.checkPayloadSum()
if err != nil || !checkValid {
return nn, fmt.Errorf("%s", err.Error())
}
sr.emptyFrame()
}
if nn == len(p) {
return nn, nil
}
} else if sr.Frame.FrameType == ContinuousFrameType {
checkValid, err = sr.checkPayloadSum()
if err != nil || !checkValid {
return nn, fmt.Errorf("%s", err.Error())
}
sr.Frame.OpenLine = false
} else if sr.Frame.FrameType == EndFrameType {
err = sr.analysisEndFrame()
if err != nil {
return nn, err
}
checkValid, err = sr.checkPayloadSum()
if checkValid {
sr.Finish = true
}
return nn, err
} else if sr.Frame.FrameType == MetaEndFrameCSVType {
err = sr.analysisMetaEndFrameCSV()
if err != nil {
return nn, err
}
checkValid, err = sr.checkPayloadSum()
if checkValid {
sr.Finish = true
}
return nn, err
} else if sr.Frame.FrameType == MetaEndFrameJSONType {
err = sr.analysisMetaEndFrameJSON()
if err != nil {
return nn, err
}
checkValid, err = sr.checkPayloadSum()
if checkValid {
sr.Finish = true
}
return nn, err
}
}
return nn, nil
}
type chanReadIO struct {
readLen int
err error
}
func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) {
r := sr.Body
ch := make(chan chanReadIO, 1)
defer close(ch)
go func(p []byte) {
var needReadLength int
readChan := chanReadIO{}
needReadLength = len(p)
for {
n, err := r.Read(p[readChan.readLen:needReadLength])
readChan.readLen += n
if err != nil {
readChan.err = err
ch <- readChan
return
}
if readChan.readLen == needReadLength {
break
}
}
ch <- readChan
}(p)
select {
case <-time.After(time.Second * timeOut):
return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p))
case result := <-ch:
return result.readLen, result.err
}
}
// analysisHeader is reading selectObject response body's header
func (sr *SelectObjectResponse) analysisHeader() error {
headFrameByte := make([]byte, 20)
_, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut))
if err != nil {
return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
}
frameTypeByte := headFrameByte[0:4]
sr.Frame.Version = frameTypeByte[0]
frameTypeByte[0] = 0
bytesToInt(frameTypeByte, &sr.Frame.FrameType)
if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType &&
sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType {
return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType)
}
payloadLengthByte := headFrameByte[4:8]
bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength)
headCheckSumByte := headFrameByte[8:12]
bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum)
byteOffset := headFrameByte[12:20]
bytesToInt(byteOffset, &sr.Frame.Offset)
sr.Frame.OpenLine = true
err = sr.writerCheckCrc32(byteOffset)
return err
}
// analysisData is reading the DataFrameType data of selectObject response body
func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) {
var needReadLength int32
lenP := int32(len(p))
restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength
if lenP <= restByteLength {
needReadLength = lenP
} else {
needReadLength = restByteLength
}
n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut))
if err != nil {
return n, fmt.Errorf("read frame data error,%s", err.Error())
}
sr.Frame.ConsumedBytesLength += int32(n)
err = sr.writerCheckCrc32(p[:n])
return n, err
}
// analysisEndFrame is reading the EndFrameType data of selectObject response body
func (sr *SelectObjectResponse) analysisEndFrame() error {
var eF EndFrame
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
if err != nil {
return fmt.Errorf("read end frame error:%s", err.Error())
}
bytesToInt(payLoadBytes[0:8], &eF.TotalScanned)
bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode)
errMsgLength := sr.Frame.PayloadLength - 20
eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12])
sr.Frame.EndFrame.TotalScanned = eF.TotalScanned
sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode
sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg
err = sr.writerCheckCrc32(payLoadBytes)
return err
}
// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error {
var mCF MetaEndFrameCSV
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
if err != nil {
return fmt.Errorf("read meta end csv frame error:%s", err.Error())
}
bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned)
bytesToInt(payLoadBytes[8:12], &mCF.Status)
bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount)
bytesToInt(payLoadBytes[16:24], &mCF.RowsCount)
bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount)
errMsgLength := sr.Frame.PayloadLength - 36
mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28])
sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg
sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned
sr.Frame.MetaEndFrameCSV.Status = mCF.Status
sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount
sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount
sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount
err = sr.writerCheckCrc32(payLoadBytes)
return err
}
// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error {
var mJF MetaEndFrameJSON
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
if err != nil {
return fmt.Errorf("read meta end json frame error:%s", err.Error())
}
bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned)
bytesToInt(payLoadBytes[8:12], &mJF.Status)
bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount)
bytesToInt(payLoadBytes[16:24], &mJF.RowsCount)
errMsgLength := sr.Frame.PayloadLength - 32
mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24])
sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg
sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned
sr.Frame.MetaEndFrameJSON.Status = mJF.Status
sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount
sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount
err = sr.writerCheckCrc32(payLoadBytes)
return err
}
func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) {
payLoadChecksumByte := make([]byte, 4)
n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut))
if n == 4 {
bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum)
sr.ServerCRC32 = sr.Frame.PayloadChecksum
sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32()
if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 {
return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d",
sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32)
}
return true, err
}
return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
}
func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) {
err = nil
if sr.Frame.EnablePayloadCrc == true {
_, err = sr.WriterForCheckCrc32.Write(p)
}
return err
}
// emptyFrame is emptying SelectObjectResponse Frame information
func (sr *SelectObjectResponse) emptyFrame() {
crcCalc := crc32.NewIEEE()
sr.WriterForCheckCrc32 = crcCalc
sr.Finish = false
sr.Frame.ConsumedBytesLength = 0
sr.Frame.OpenLine = false
sr.Frame.Version = byte(0)
sr.Frame.FrameType = 0
sr.Frame.PayloadLength = 0
sr.Frame.HeaderCheckSum = 0
sr.Frame.Offset = 0
sr.Frame.Data = ""
sr.Frame.EndFrame.TotalScanned = 0
sr.Frame.EndFrame.HTTPStatusCode = 0
sr.Frame.EndFrame.ErrorMsg = ""
sr.Frame.MetaEndFrameCSV.TotalScanned = 0
sr.Frame.MetaEndFrameCSV.Status = 0
sr.Frame.MetaEndFrameCSV.SplitsCount = 0
sr.Frame.MetaEndFrameCSV.RowsCount = 0
sr.Frame.MetaEndFrameCSV.ColumnsCount = 0
sr.Frame.MetaEndFrameCSV.ErrorMsg = ""
sr.Frame.MetaEndFrameJSON.TotalScanned = 0
sr.Frame.MetaEndFrameJSON.Status = 0
sr.Frame.MetaEndFrameJSON.SplitsCount = 0
sr.Frame.MetaEndFrameJSON.RowsCount = 0
sr.Frame.MetaEndFrameJSON.ErrorMsg = ""
sr.Frame.PayloadChecksum = 0
}
// bytesToInt byte's array trans to int
func bytesToInt(b []byte, ret interface{}) {
binBuf := bytes.NewBuffer(b)
binary.Read(binBuf, binary.BigEndian, ret)
}

@ -1,42 +0,0 @@
//go:build !go1.7
// +build !go1.7
package oss
import (
"crypto/tls"
"net"
"net/http"
"time"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
httpMaxConns := conn.config.HTTPMaxConns
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
d := net.Dialer{
Timeout: httpTimeOut.ConnectTimeout,
KeepAlive: 30 * time.Second,
}
if config.LocalAddr != nil {
d.LocalAddr = config.LocalAddr
}
conn, err := d.Dial(netw, addr)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
if config.InsecureSkipVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
return transport
}

@ -1,45 +0,0 @@
//go:build go1.7
// +build go1.7
package oss
import (
"crypto/tls"
"net"
"net/http"
"time"
)
func newTransport(conn *Conn, config *Config) *http.Transport {
httpTimeOut := conn.config.HTTPTimeout
httpMaxConns := conn.config.HTTPMaxConns
// New Transport
transport := &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
d := net.Dialer{
Timeout: httpTimeOut.ConnectTimeout,
KeepAlive: 30 * time.Second,
}
if config.LocalAddr != nil {
d.LocalAddr = config.LocalAddr
}
conn, err := d.Dial(netw, addr)
if err != nil {
return nil, err
}
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
},
MaxIdleConns: httpMaxConns.MaxIdleConns,
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
MaxConnsPerHost: httpMaxConns.MaxConnsPerHost,
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
}
if config.InsecureSkipVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
return transport
}

File diff suppressed because it is too large Load Diff

@ -1,552 +0,0 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
)
// UploadFile is multipart file upload.
//
// objectKey the object name.
// filePath the local file path to upload.
// partSize the part size in byte.
// options the options for uploading object.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (100KB, 5GB]")
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
if cpFilePath != "" {
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
}
}
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
}
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
absPath, _ := filepath.Abs(srcFile)
cpFileName := getCpFileName(absPath, dest, "")
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// ----- concurrent upload without checkpoint -----
// getCpConfig gets checkpoint configuration
func getCpConfig(options []Option) *cpConfig {
cpcOpt, err := FindOption(options, checkpointConfig, nil)
if err != nil || cpcOpt == nil {
return nil
}
return cpcOpt.(*cpConfig)
}
// getCpFileName return the name of the checkpoint file
func getCpFileName(src, dest, versionId string) string {
md5Ctx := md5.New()
md5Ctx.Write([]byte(src))
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
md5Ctx.Reset()
md5Ctx.Write([]byte(dest))
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
if versionId == "" {
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
}
md5Ctx.Reset()
md5Ctx.Write([]byte(versionId))
versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum)
}
// getRoutines gets the routine count. by default it's 1.
func getRoutines(options []Option) int {
rtnOpt, err := FindOption(options, routineNum, nil)
if err != nil || rtnOpt == nil {
return 1
}
rs := rtnOpt.(int)
if rs < 1 {
rs = 1
} else if rs > 100 {
rs = 100
}
return rs
}
// getPayer return the payer of the request
func getPayer(options []Option) string {
payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil)
if err != nil || payerOpt == nil {
return ""
}
return payerOpt.(string)
}
// GetProgressListener gets the progress callback
func GetProgressListener(options []Option) ProgressListener {
isSet, listener, _ := IsOptionSet(options, progressListener)
if !isSet {
return nil
}
return listener.(ProgressListener)
}
// uploadPartHook is for testing usage
type uploadPartHook func(id int, chunk FileChunk) error
var uploadPartHooker uploadPartHook = defaultUploadPart
func defaultUploadPart(id int, chunk FileChunk) error {
return nil
}
// workerArg defines worker argument structure
type workerArg struct {
bucket *Bucket
filePath string
imur InitiateMultipartUploadResult
options []Option
hook uploadPartHook
}
// worker is the worker coroutine function
type defaultUploadProgressListener struct {
}
// ProgressChanged no-ops
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) {
}
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(id, chunk); err != nil {
failed <- err
break
}
var respHeader http.Header
p := Progress(&defaultUploadProgressListener{})
opts := make([]Option, len(arg.options)+2)
opts = append(opts, arg.options...)
// use defaultUploadProgressListener
opts = append(opts, p, GetResponseHeader(&respHeader))
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...)
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
if err != nil {
arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error())
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// scheduler function
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
for _, chunk := range chunks {
jobs <- chunk
}
close(jobs)
}
func getTotalBytes(chunks []FileChunk) int64 {
var tb int64
for _, chunk := range chunks {
tb += chunk.Size
}
return tb
}
// uploadFile is a concurrent upload, without checkpoint
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
listener := GetProgressListener(options)
chunks, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
abortOptions := ChoiceAbortPartOption(options)
// Initialize the multipart upload
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getTotalBytes(chunks)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start the worker coroutine
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// Schedule the jobs
go scheduler(jobs, chunks)
// Waiting for the upload finished
completed := 0
parts := make([]UploadPart, len(chunks))
for completed < len(chunks) {
select {
case part := <-results:
completed++
parts[part.PartNumber-1] = part
completedBytes += chunks[part.PartNumber-1].Size
// why RwBytes in ProgressEvent is 0 ?
// because read or write event has been notified in teeReader.Read()
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
// Complete the multpart upload
_, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...)
if err != nil {
bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
return nil
}
// ----- concurrent upload with checkpoint -----
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
type uploadCheckpoint struct {
Magic string // Magic
MD5 string // Checkpoint file content's MD5
FilePath string // Local file path
FileStat cpStat // File state
ObjectKey string // Key
UploadID string // Upload ID
Parts []cpPart // All parts of the local file
}
type cpStat struct {
Size int64 // File size
LastModified time.Time // File's last modified time
MD5 string // Local file's MD5
}
type cpPart struct {
Chunk FileChunk // File chunk
Part UploadPart // Uploaded part
IsCompleted bool // Upload complete flag
}
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
// Compare the CP's magic number and MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
return false, nil
}
// Make sure if the local file is updated.
fd, err := os.Open(filePath)
if err != nil {
return false, err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return false, err
}
md, err := calcFileMD5(filePath)
if err != nil {
return false, err
}
// Compare the file size, file's last modified time and file's MD5
if cp.FileStat.Size != st.Size() ||
!cp.FileStat.LastModified.Equal(st.ModTime()) ||
cp.FileStat.MD5 != md {
return false, nil
}
return true, nil
}
// load loads from the file
func (cp *uploadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump dumps to the local file
func (cp *uploadCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// updatePart updates the part status
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
cp.Parts[part.PartNumber-1].Part = part
cp.Parts[part.PartNumber-1].IsCompleted = true
}
// todoParts returns unfinished parts
func (cp *uploadCheckpoint) todoParts() []FileChunk {
fcs := []FileChunk{}
for _, part := range cp.Parts {
if !part.IsCompleted {
fcs = append(fcs, part.Chunk)
}
}
return fcs
}
// allParts returns all parts
func (cp *uploadCheckpoint) allParts() []UploadPart {
ps := []UploadPart{}
for _, part := range cp.Parts {
ps = append(ps, part.Part)
}
return ps
}
// getCompletedBytes returns completed bytes count
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for _, part := range cp.Parts {
if part.IsCompleted {
completedBytes += part.Chunk.Size
}
}
return completedBytes
}
// calcFileMD5 calculates the MD5 for the specified local file
func calcFileMD5(filePath string) (string, error) {
return "", nil
}
// prepare initializes the multipart upload
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
// CP
cp.Magic = uploadCpMagic
cp.FilePath = filePath
cp.ObjectKey = objectKey
// Local file
fd, err := os.Open(filePath)
if err != nil {
return err
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
return err
}
cp.FileStat.Size = st.Size()
cp.FileStat.LastModified = st.ModTime()
md, err := calcFileMD5(filePath)
if err != nil {
return err
}
cp.FileStat.MD5 = md
// Chunks
parts, err := SplitFileByPartSize(filePath, partSize)
if err != nil {
return err
}
cp.Parts = make([]cpPart, len(parts))
for i, part := range parts {
cp.Parts[i].Chunk = part
cp.Parts[i].IsCompleted = false
}
// Init load
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
if err != nil {
return err
}
cp.UploadID = imur.UploadID
return nil
}
// complete completes the multipart upload and deletes the local CP files
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
Key: cp.ObjectKey, UploadID: cp.UploadID}
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// uploadFileWithCp handles concurrent upload with checkpoint
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
listener := GetProgressListener(options)
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
// Load CP data
ucp := uploadCheckpoint{}
err := ucp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Load error or the CP data is invalid.
valid, err := ucp.isValid(filePath)
if err != nil || !valid {
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
chunks := ucp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: bucket.BucketName,
Key: objectKey,
UploadID: ucp.UploadID}
jobs := make(chan FileChunk, len(chunks))
results := make(chan UploadPart, len(chunks))
failed := make(chan error)
die := make(chan bool)
completedBytes := ucp.getCompletedBytes()
// why RwBytes in ProgressEvent is 0 ?
// because read or write event has been notified in teeReader.Read()
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
// Start the workers
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
for w := 1; w <= routines; w++ {
go worker(w, arg, jobs, results, failed, die)
}
// Schedule jobs
go scheduler(jobs, chunks)
// Waiting for the job finished
completed := 0
for completed < len(chunks) {
select {
case part := <-results:
completed++
ucp.updatePart(part)
ucp.dump(cpFilePath)
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
return err
}
if completed >= len(chunks) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
publishProgress(listener, event)
// Complete the multipart upload
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions)
return err
}

@ -1,539 +0,0 @@
package oss
import (
"bytes"
"errors"
"fmt"
"hash/crc32"
"hash/crc64"
"io"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"time"
)
var sys_name string
var sys_release string
var sys_machine string
func init() {
sys_name = runtime.GOOS
sys_release = "-"
sys_machine = runtime.GOARCH
if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
sys_name = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
sys_release = string(bytes.TrimSpace(out))
}
if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
sys_machine = string(bytes.TrimSpace(out))
}
}
// userAgent gets user agent
// It has the SDK version information, OS information and GO version
func userAgent() string {
sys := getSysInfo()
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
sys.release, sys.machine, runtime.Version())
}
type sysInfo struct {
name string // OS name such as windows/Linux
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
machine string // CPU type amd64/x86_64
}
// getSysInfo gets system info
// gets the OS information and CPU type
func getSysInfo() sysInfo {
return sysInfo{name: sys_name, release: sys_release, machine: sys_machine}
}
// GetRangeConfig gets the download range from the options.
func GetRangeConfig(options []Option) (*UnpackedRange, error) {
rangeOpt, err := FindOption(options, HTTPHeaderRange, nil)
if err != nil || rangeOpt == nil {
return nil, err
}
return ParseRange(rangeOpt.(string))
}
// UnpackedRange
type UnpackedRange struct {
HasStart bool // Flag indicates if the start point is specified
HasEnd bool // Flag indicates if the end point is specified
Start int64 // Start point
End int64 // End point
}
// InvalidRangeError returns invalid range error
func InvalidRangeError(r string) error {
return fmt.Errorf("InvalidRange %s", r)
}
func GetRangeString(unpackRange UnpackedRange) string {
var strRange string
if unpackRange.HasStart && unpackRange.HasEnd {
strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End)
} else if unpackRange.HasStart {
strRange = fmt.Sprintf("%d-", unpackRange.Start)
} else if unpackRange.HasEnd {
strRange = fmt.Sprintf("-%d", unpackRange.End)
}
return strRange
}
// ParseRange parse various styles of range such as bytes=M-N
func ParseRange(normalizedRange string) (*UnpackedRange, error) {
var err error
hasStart := false
hasEnd := false
var start int64
var end int64
// Bytes==M-N or ranges=M-N
nrSlice := strings.Split(normalizedRange, "=")
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
return nil, InvalidRangeError(normalizedRange)
}
// Bytes=M-N,X-Y
rSlice := strings.Split(nrSlice[1], ",")
rStr := rSlice[0]
if strings.HasSuffix(rStr, "-") { // M-
startStr := rStr[:len(rStr)-1]
start, err = strconv.ParseInt(startStr, 10, 64)
if err != nil {
return nil, InvalidRangeError(normalizedRange)
}
hasStart = true
} else if strings.HasPrefix(rStr, "-") { // -N
len := rStr[1:]
end, err = strconv.ParseInt(len, 10, 64)
if err != nil {
return nil, InvalidRangeError(normalizedRange)
}
if end == 0 { // -0
return nil, InvalidRangeError(normalizedRange)
}
hasEnd = true
} else { // M-N
valSlice := strings.Split(rStr, "-")
if len(valSlice) != 2 {
return nil, InvalidRangeError(normalizedRange)
}
start, err = strconv.ParseInt(valSlice[0], 10, 64)
if err != nil {
return nil, InvalidRangeError(normalizedRange)
}
hasStart = true
end, err = strconv.ParseInt(valSlice[1], 10, 64)
if err != nil {
return nil, InvalidRangeError(normalizedRange)
}
hasEnd = true
}
return &UnpackedRange{hasStart, hasEnd, start, end}, nil
}
// AdjustRange returns adjusted range, adjust the range according to the length of the file
func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) {
if ur == nil {
return 0, size
}
if ur.HasStart && ur.HasEnd {
start = ur.Start
end = ur.End + 1
if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End {
start = 0
end = size
}
} else if ur.HasStart {
start = ur.Start
end = size
if ur.Start < 0 || ur.Start >= size {
start = 0
}
} else if ur.HasEnd {
start = size - ur.End
end = size
if ur.End < 0 || ur.End > size {
start = 0
end = size
}
}
return
}
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
// gets the current time in Unix time, in seconds.
func GetNowSec() int64 {
return time.Now().Unix()
}
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64. Note that this
// means the result of calling UnixNano on the zero Time is undefined.
// gets the current time in Unix time, in nanoseconds.
func GetNowNanoSec() int64 {
return time.Now().UnixNano()
}
// GetNowGMT gets the current time in GMT format.
func GetNowGMT() string {
return time.Now().UTC().Format(http.TimeFormat)
}
// FileChunk is the file chunk definition
type FileChunk struct {
Number int // Chunk number
Offset int64 // Chunk offset
Size int64 // Chunk size.
}
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
if int64(chunkNum) > stat.Size() {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []FileChunk
var chunk = FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (stat.Size() / chunkN)
if i == chunkN-1 {
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
} else {
chunk.Size = stat.Size() / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
var chunkN = stat.Size() / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []FileChunk
var chunk = FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if stat.Size()%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = stat.Size() % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}
// GetPartEnd calculates the end position
func GetPartEnd(begin int64, total int64, per int64) int64 {
if begin+per > total {
return total - 1
}
return begin + per - 1
}
// CrcTable returns the table constructed from the specified polynomial
var CrcTable = func() *crc64.Table {
return crc64.MakeTable(crc64.ECMA)
}
// CrcTable returns the table constructed from the specified polynomial
var crc32Table = func() *crc32.Table {
return crc32.MakeTable(crc32.IEEE)
}
// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
func ChoiceTransferPartOption(options []Option) []Option {
var outOption []Option
listener, _ := FindOption(options, progressListener, nil)
if listener != nil {
outOption = append(outOption, Progress(listener.(ProgressListener)))
}
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
if payer != nil {
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
}
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
outOption = append(outOption, VersionId(versionId.(string)))
}
trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil)
if trafficLimit != nil {
speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64)
outOption = append(outOption, TrafficLimitHeader(speed))
}
respHeader, _ := FindOption(options, responseHeader, nil)
if respHeader != nil {
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
}
return outOption
}
// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
func ChoiceCompletePartOption(options []Option) []Option {
var outOption []Option
listener, _ := FindOption(options, progressListener, nil)
if listener != nil {
outOption = append(outOption, Progress(listener.(ProgressListener)))
}
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
if payer != nil {
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
}
acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil)
if acl != nil {
outOption = append(outOption, ObjectACL(ACLType(acl.(string))))
}
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
if callback != nil {
outOption = append(outOption, Callback(callback.(string)))
}
callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil)
if callbackVar != nil {
outOption = append(outOption, CallbackVar(callbackVar.(string)))
}
respHeader, _ := FindOption(options, responseHeader, nil)
if respHeader != nil {
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
}
forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil)
if forbidOverWrite != nil {
if forbidOverWrite.(string) == "true" {
outOption = append(outOption, ForbidOverWrite(true))
} else {
outOption = append(outOption, ForbidOverWrite(false))
}
}
notification, _ := FindOption(options, HttpHeaderOssNotification, nil)
if notification != nil {
outOption = append(outOption, SetHeader(HttpHeaderOssNotification, notification))
}
return outOption
}
// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
func ChoiceAbortPartOption(options []Option) []Option {
var outOption []Option
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
if payer != nil {
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
}
respHeader, _ := FindOption(options, responseHeader, nil)
if respHeader != nil {
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
}
return outOption
}
// ChoiceHeadObjectOption choices valid option supported by HeadObject
func ChoiceHeadObjectOption(options []Option) []Option {
var outOption []Option
// not select HTTPHeaderRange to get whole object length
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
if payer != nil {
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
}
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
outOption = append(outOption, VersionId(versionId.(string)))
}
respHeader, _ := FindOption(options, responseHeader, nil)
if respHeader != nil {
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
}
return outOption
}
func CheckBucketName(bucketName string) error {
nameLen := len(bucketName)
if nameLen < 3 || nameLen > 63 {
return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen)
}
for _, v := range bucketName {
if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') {
return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName)
}
}
if bucketName[0] == '-' || bucketName[nameLen-1] == '-' {
return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName)
}
return nil
}
func GetReaderLen(reader io.Reader) (int64, error) {
var contentLength int64
var err error
switch v := reader.(type) {
case *bytes.Buffer:
contentLength = int64(v.Len())
case *bytes.Reader:
contentLength = int64(v.Len())
case *strings.Reader:
contentLength = int64(v.Len())
case *os.File:
fInfo, fError := v.Stat()
if fError != nil {
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
} else {
contentLength = fInfo.Size()
}
case *io.LimitedReader:
contentLength = int64(v.N)
case *LimitedReadCloser:
contentLength = int64(v.N)
default:
err = fmt.Errorf("can't get reader content length,unkown reader type")
}
return contentLength, err
}
func LimitReadCloser(r io.Reader, n int64) io.Reader {
var lc LimitedReadCloser
lc.R = r
lc.N = n
return &lc
}
// LimitedRC support Close()
type LimitedReadCloser struct {
io.LimitedReader
}
func (lc *LimitedReadCloser) Close() error {
if closer, ok := lc.R.(io.ReadCloser); ok {
return closer.Close()
}
return nil
}
type DiscardReadCloser struct {
RC io.ReadCloser
Discard int
}
func (drc *DiscardReadCloser) Read(b []byte) (int, error) {
n, err := drc.RC.Read(b)
if drc.Discard == 0 || n <= 0 {
return n, err
}
if n <= drc.Discard {
drc.Discard -= n
return 0, err
}
realLen := n - drc.Discard
copy(b[0:realLen], b[drc.Discard:n])
drc.Discard = 0
return realLen, err
}
func (drc *DiscardReadCloser) Close() error {
closer, ok := drc.RC.(io.ReadCloser)
if ok {
return closer.Close()
}
return nil
}
func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) {
for _, key := range keys {
value, ok := params[key]
if ok && value == "" {
// convert "" to nil
params[key] = nil
}
}
}
func EscapeLFString(str string) string {
var log bytes.Buffer
for i := 0; i < len(str); i++ {
if str[i] != '\n' {
log.WriteByte(str[i])
} else {
log.WriteString("\\n")
}
}
return log.String()
}

@ -1,12 +0,0 @@
dist
/doc
/doc-staging
.yardoc
Gemfile.lock
/internal/awstesting/integration/smoke/**/importmarker__.go
/internal/awstesting/integration/smoke/_test/
/vendor
/private/model/cli/gen-api/gen-api
.gradle/
build/
.idea/

@ -1,27 +0,0 @@
[run]
concurrency = 4
timeout = "1m"
issues-exit-code = 0
modules-download-mode = "readonly"
allow-parallel-runners = true
skip-dirs = ["internal/repotools"]
skip-dirs-use-default = true
[output]
format = "github-actions"
[linters-settings.cyclop]
skip-tests = false
[linters-settings.errcheck]
check-blank = true
[linters]
disable-all = true
enable = ["errcheck"]
fast = false
[issues]
exclude-use-default = false
# Refer config definitions at https://golangci-lint.run/usage/configuration/#config-file

@ -1,31 +0,0 @@
language: go
sudo: true
dist: bionic
branches:
only:
- main
os:
- linux
- osx
# Travis doesn't work with windows and Go tip
#- windows
go:
- tip
matrix:
allow_failures:
- go: tip
before_install:
- if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi
- (cd /tmp/; go get golang.org/x/lint/golint)
env:
- EACHMODULE_CONCURRENCY=4
script:
- make ci-test-no-generate;

File diff suppressed because it is too large Load Diff

@ -1,4 +0,0 @@
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.

@ -1,178 +0,0 @@
# Contributing to the AWS SDK for Go
Thank you for your interest in contributing to the AWS SDK for Go!
We work hard to provide a high-quality and useful SDK, and we greatly value
feedback and contributions from our community. Whether it's a bug report,
new feature, correction, or additional documentation, we welcome your issues
and pull requests. Please read through this document before submitting any
[issues] or [pull requests][pr] to ensure we have all the necessary information to
effectively respond to your bug report or contribution.
Jump To:
* [Bug Reports](#bug-reports)
* [Feature Requests](#feature-requests)
* [Code Contributions](#code-contributions)
## How to contribute
*Before you send us a pull request, please be sure that:*
1. You're working from the latest source on the master branch.
2. You check existing open, and recently closed, pull requests to be sure
that someone else hasn't already addressed the problem.
3. You create an issue before working on a contribution that will take a
significant amount of your time.
*Creating a Pull Request*
1. Fork the repository.
2. In your fork, make your change in a branch that's based on this repo's master branch.
3. Commit the change to your fork, using a clear and descriptive commit message.
4. Create a pull request, answering any questions in the pull request form.
For contributions that will take a significant amount of time, open a new
issue to pitch your idea before you get started. Explain the problem and
describe the content you want to see added to the documentation. Let us know
if you'll write it yourself or if you'd like us to help. We'll discuss your
proposal with you and let you know whether we're likely to accept it.
## Bug Reports
You can file bug reports against the SDK on the [GitHub issues][issues] page.
If you are filing a report for a bug or regression in the SDK, it's extremely
helpful to provide as much information as possible when opening the original
issue. This helps us reproduce and investigate the possible bug without having
to wait for this extra information to be provided. Please read the following
guidelines prior to filing a bug report.
1. Search through existing [issues][] to ensure that your specific issue has
not yet been reported. If it is a common issue, it is likely there is
already a bug report for your problem.
2. Ensure that you have tested the latest version of the SDK. Although you
may have an issue against an older version of the SDK, we cannot provide
bug fixes for old versions. It's also possible that the bug may have been
fixed in the latest release.
3. Provide as much information about your environment, SDK version, and
relevant dependencies as possible. For example, let us know what version
of Go you are using, which and version of the operating system, and the
the environment your code is running in. e.g Container.
4. Provide a minimal test case that reproduces your issue or any error
information you related to your problem. We can provide feedback much
more quickly if we know what operations you are calling in the SDK. If
you cannot provide a full test case, provide as much code as you can
to help us diagnose the problem. Any relevant information should be provided
as well, like whether this is a persistent issue, or if it only occurs
some of the time.
## Feature Requests
Open an [issue][issues] with the following:
* A short, descriptive title. Ideally, other community members should be able
to get a good idea of the feature just from reading the title.
* A detailed description of the the proposed feature.
* Why it should be added to the SDK.
* If possible, example code to illustrate how it should work.
* Use Markdown to make the request easier to read;
* If you intend to implement this feature, indicate that you'd like to the issue to be assigned to you.
## Code Contributions
We are always happy to receive code and documentation contributions to the SDK.
Please be aware of the following notes prior to opening a pull request:
1. The SDK is released under the [Apache license][license]. Any code you submit
will be released under that license. For substantial contributions, we may
ask you to sign a [Contributor License Agreement (CLA)][cla].
2. If you would like to implement support for a significant feature that is not
yet available in the SDK, please talk to us beforehand to avoid any
duplication of effort.
3. Wherever possible, pull requests should contain tests as appropriate.
Bugfixes should contain tests that exercise the corrected behavior (i.e., the
test should fail without the bugfix and pass with it), and new features
should be accompanied by tests exercising the feature.
4. Pull requests that contain failing tests will not be merged until the test
failures are addressed. Pull requests that cause a significant drop in the
SDK's test coverage percentage are unlikely to be merged until tests have
been added.
5. The JSON files under the SDK's `models` folder are sourced from outside the SDK.
Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests
directly on these models. If you discover an issue with the models please
create a [GitHub issue][issues] describing the issue.
### Testing
To run the tests locally, running the `make unit` command will `go get` the
SDK's testing dependencies, and run vet, link and unit tests for the SDK.
```
make unit
```
Standard go testing functionality is supported as well. To test SDK code that
is tagged with `codegen` you'll need to set the build tag in the go test
command. The `make unit` command will do this automatically.
```
go test -tags codegen ./private/...
```
See the `Makefile` for additional testing tags that can be used in testing.
To test on multiple platform the SDK includes several DockerFiles under the
`awstesting/sandbox` folder, and associated make recipes to to execute
unit testing within environments configured for specific Go versions.
```
make sandbox-test-go18
```
To run all sandbox environments use the following make recipe
```
# Optionally update the Go tip that will be used during the batch testing
make update-aws-golang-tip
# Run all SDK tests for supported Go versions in sandboxes
make sandbox-test
```
In addition the sandbox environment include make recipes for interactive modes
so you can run command within the Docker container and context of the SDK.
```
make sandbox-go18
```
### Changelog Documents
You can see all release changes in the `CHANGELOG.md` file at the root of the
repository. The release notes added to this file will contain service client
updates, and major SDK changes. When submitting a pull request please include an entry in `CHANGELOG_PENDING.md` under the appropriate changelog type so your changelog entry is included on the following release.
#### Changelog Types
* `SDK Features` - For major additive features, internal changes that have
outward impact, or updates to the SDK foundations. This will result in a minor
version change.
* `SDK Enhancements` - For minor additive features or incremental sized changes.
This will result in a patch version change.
* `SDK Bugs` - For minor changes that resolve an issue. This will result in a
patch version change.
[issues]: https://github.com/aws/aws-sdk-go/issues
[pr]: https://github.com/aws/aws-sdk-go/pulls
[license]: http://aws.amazon.com/apache2.0/
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
[releasenotes]: https://github.com/aws/aws-sdk-go/releases

@ -1,15 +0,0 @@
Open Discussions
---
The following issues are currently open for community feedback.
All discourse must adhere to the [Code of Conduct] policy.
* [Refactoring API Client Paginators](https://github.com/aws/aws-sdk-go-v2/issues/439)
* [Refactoring API Client Waiters](https://github.com/aws/aws-sdk-go-v2/issues/442)
* [Refactoring API Client Enums and Types to Discrete Packages](https://github.com/aws/aws-sdk-go-v2/issues/445)
* [SDK Modularization](https://github.com/aws/aws-sdk-go-v2/issues/444)
Past Discussions
---
The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions.
[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,519 +0,0 @@
# Lint rules to ignore
LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type'
LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID'
UNIT_TEST_TAGS=
BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest"
SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go
SDK_MIN_GO_VERSION ?= 1.15
EACHMODULE_FAILFAST ?= true
EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST}
EACHMODULE_CONCURRENCY ?= 1
EACHMODULE_CONCURRENCY_FLAG=-c ${EACHMODULE_CONCURRENCY}
EACHMODULE_SKIP ?=
EACHMODULE_SKIP_FLAG=-skip="${EACHMODULE_SKIP}"
EACHMODULE_FLAGS=${EACHMODULE_CONCURRENCY_FLAG} ${EACHMODULE_FAILFAST_FLAG} ${EACHMODULE_SKIP_FLAG}
# SDK's Core and client packages that are compatible with Go 1.9+.
SDK_CORE_PKGS=./aws/... ./internal/...
SDK_CLIENT_PKGS=./service/...
SDK_COMPA_PKGS=${SDK_CORE_PKGS} ${SDK_CLIENT_PKGS}
# SDK additional packages that are used for development of the SDK.
SDK_EXAMPLES_PKGS=
SDK_ALL_PKGS=${SDK_COMPA_PKGS} ${SDK_EXAMPLES_PKGS}
RUN_NONE=-run NONE
RUN_INTEG=-run '^TestInteg_'
CODEGEN_RESOURCES_PATH=$(shell pwd)/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen
CODEGEN_API_MODELS_PATH=$(shell pwd)/codegen/sdk-codegen/aws-models
ENDPOINTS_JSON=${CODEGEN_RESOURCES_PATH}/endpoints.json
ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json
LICENSE_FILE=$(shell pwd)/LICENSE.txt
SMITHY_GO_VERSION ?=
PRE_RELEASE_VERSION ?=
RELEASE_MANIFEST_FILE ?=
RELEASE_CHGLOG_DESC_FILE ?=
REPOTOOLS_VERSION ?= latest
REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools
REPOTOOLS_CMD_ANNOTATE_STABLE_GEN = ${REPOTOOLS_MODULE}/cmd/annotatestablegen@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_MAKE_RELATIVE = ${REPOTOOLS_MODULE}/cmd/makerelative@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledependency@${REPOTOOLS_VERSION}
REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false
REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE}
REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?=
ifneq ($(PRE_RELEASE_VERSION),)
REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
endif
.PHONY: all
all: generate unit
###################
# Code Generation #
###################
.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \
gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \
add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \
sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \
update-module-metadata download-modules-%
generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \
gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \
tidy-modules-. add-module-license-files gen-aws-ptrs format
smithy-generate:
cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean
smithy-build:
cd codegen && ./gradlew clean build -Plog-tests
smithy-build-%:
@# smithy-build- command that uses the pattern to define build filter that
@# the smithy API model service id starts with. Strips off the
@# "smithy-build-".
@#
@# e.g. smithy-build-com.amazonaws.rds
@# e.g. smithy-build-com.amazonaws.rds#AmazonRDSv19
cd codegen && \
SMITHY_GO_BUILD_API="$(subst smithy-build-,,$@)" ./gradlew clean build -Plog-tests
smithy-annotate-stable:
go run ${REPOTOOLS_CMD_ANNOTATE_STABLE_GEN}
smithy-clean:
cd codegen && ./gradlew clean
smithy-go-publish-local:
rm -rf /tmp/smithy-go-local
git clone https://github.com/aws/smithy-go /tmp/smithy-go-local
make -C /tmp/smithy-go-local smithy-clean smithy-publish-local
format:
gofmt -w -s .
gen-config-asserts:
@echo "Generating SDK config package implementor assertions"
cd config \
&& go mod tidy \
&& go generate
gen-internal-codegen:
@echo "Generating internal/codegen"
cd internal/codegen \
&& go generate
gen-repo-mod-replace:
@echo "Generating go.mod replace for repo modules"
go run ${REPOTOOLS_CMD_MAKE_RELATIVE}
gen-mod-replace-smithy-%:
@# gen-mod-replace-smithy- command that uses the pattern to define build filter that
@# for modules to add replace to. Strips off the "gen-mod-replace-smithy-".
@#
@# SMITHY_GO_SRC environment variable is the path to add replace to
@#
@# e.g. gen-mod-replace-smithy-service_ssooidc
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
"go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}"
gen-mod-dropreplace-smithy-%:
@# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that
@# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-".
@#
@# e.g. gen-mod-dropreplace-smithy-service_ssooidc
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
"go mod edit -dropreplace github.com/aws/smithy-go"
gen-aws-ptrs:
cd aws && go generate
tidy-modules-%:
@# tidy command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "tidy-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. tidy-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst tidy-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go mod tidy"
download-modules-%:
@# download command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "download-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. download-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst download-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go mod download all"
add-module-license-files:
cd internal/repotools/cmd/eachmodule && \
go run . -skip-root \
"cp $(LICENSE_FILE) ."
sync-models: sync-endpoints-model sync-api-models
sync-endpoints-model: sync-endpoints.json
sync-endpoints.json:
[[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty"
clone-v1-models:
rm -rf /tmp/aws-sdk-go-model-sync
git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync
sync-api-models:
cd internal/repotools/cmd/syncAPIModels && \
go run . \
-m ${API_MODELS} \
-o ${CODEGEN_API_MODELS_PATH}
copy-attributevalue-feature:
cd ./feature/dynamodbstreams/attributevalue && \
find . -name "*.go" | grep -v "doc.go" | xargs -I % rm % && \
find ../../dynamodb/attributevalue -name "*.go" | grep -v "doc.go" | xargs -I % cp % . && \
ls *.go | grep -v "convert.go" | grep -v "doc.go" | \
xargs -I % sed -i.bk -E 's:github.com/aws/aws-sdk-go-v2/(service|feature)/dynamodb:github.com/aws/aws-sdk-go-v2/\1/dynamodbstreams:g' % && \
ls *.go | grep -v "convert.go" | grep -v "doc.go" | \
xargs -I % sed -i.bk 's:DynamoDB:DynamoDBStreams:g' % && \
ls *.go | grep -v "doc.go" | \
xargs -I % sed -i.bk 's:dynamodb\.:dynamodbstreams.:g' % && \
sed -i.bk 's:streams\.:ddbtypes.:g' "convert.go" && \
sed -i.bk 's:ddb\.:streams.:g' "convert.go" && \
sed -i.bk 's:ddbtypes\.:ddb.:g' "convert.go" &&\
sed -i.bk 's:Streams::g' "convert.go" && \
rm -rf ./*.bk && \
go mod tidy && \
gofmt -w -s . && \
go test .
min-go-version-%:
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst min-go-version-,,$@)) ${EACHMODULE_FLAGS} \
"go mod edit -go=${SDK_MIN_GO_VERSION}"
update-requires:
go run ${REPOTOOLS_CMD_UPDATE_REQUIRES}
update-module-metadata:
go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA}
################
# Unit Testing #
################
.PHONY: unit unit-race unit-test unit-race-test unit-race-modules-% unit-modules-% build build-modules-% \
go-build-modules-% test test-race-modules-% test-modules-% cachedep cachedep-modules-% api-diff-modules-%
unit: lint unit-modules-.
unit-race: lint unit-race-modules-.
unit-test: test-modules-.
unit-race-test: test-race-modules-.
unit-race-modules-%:
@# unit command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "unit-race-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. unit-race-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst unit-race-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go vet ${BUILD_TAGS} --all ./..." \
"go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
"go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
unit-modules-%:
@# unit command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "unit-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. unit-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst unit-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go vet ${BUILD_TAGS} --all ./..." \
"go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
"go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
build: build-modules-.
build-modules-%:
@# build command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "build-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. build-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst build-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test ${BUILD_TAGS} ${RUN_NONE} ./..."
go-build-modules-%:
@# build command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "build-modules-" and
@# replaces all "_" with "/".
@#
@# Validates that all modules in the repo have buildable Go files.
@#
@# e.g. go-build-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst go-build-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go build ${BUILD_TAGS} ./..."
test: test-modules-.
test-race-modules-%:
@# Test command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "test-race-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. test-race-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst test-race-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
test-modules-%:
@# Test command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "test-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. test-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst test-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
cachedep: cachedep-modules-.
cachedep-modules-%:
@# build command that uses the pattern to define the root path that the
@# module caching will start from. Strips off the "cachedep-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. cachedep-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst cachedep-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go mod download"
api-diff-modules-%:
@# Command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "api-diff-modules-" and
@# replaces all "_" with "/".
@#
@# Requires golang.org/x/exp/cmd/gorelease to be available in the GOPATH.
@#
@# e.g. api-diff-modules-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst api-diff-modules-,,$@)) \
-fail-fast=true \
-c 1 \
-skip="internal/repotools" \
"$$(go env GOPATH)/bin/gorelease"
##############
# CI Testing #
##############
.PHONY: ci-test ci-test-no-generate ci-test-generate-validate
ci-test: generate unit-race ci-test-generate-validate
ci-test-no-generate: unit-race
ci-test-generate-validate:
@echo "CI test validate no generated code changes"
git update-index --assume-unchanged go.mod go.sum
git add . -A
gitstatus=`git diff --cached --ignore-space-change`; \
echo "$$gitstatus"; \
if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi
git update-index --no-assume-unchanged go.mod go.sum
ci-lint: ci-lint-.
ci-lint-%:
@# Run golangci-lint command that uses the pattern to define the root path that the
@# module check will start from. Strips off the "ci-lint-" and
@# replaces all "_" with "/".
@#
@# e.g. ci-lint-internal_protocoltest
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst ci-lint-,,$@)) \
-fail-fast=false \
-c 1 \
-skip="internal/repotools" \
"golangci-lint run"
ci-lint-install:
@# Installs golangci-lint at GoPATH.
@# This should be used to run golangci-lint locally.
@#
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
#######################
# Integration Testing #
#######################
.PHONY: integration integ-modules-% cleanup-integ-buckets
integration: integ-modules-service
integ-modules-%:
@# integration command that uses the pattern to define the root path that
@# the module testing will start from. Strips off the "integ-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. test-modules-service_dynamodb
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst integ-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=10m -tags "integration" -v ${RUN_INTEG} -count 1 ./..."
cleanup-integ-buckets:
@echo "Cleaning up SDK integration resources"
go run -tags "integration" ./internal/awstesting/cmd/bucket_cleanup/main.go "aws-sdk-go-integration"
##############
# Benchmarks #
##############
.PHONY: bench bench-modules-%
bench: bench-modules-.
bench-modules-%:
@# benchmark command that uses the pattern to define the root path that
@# the module testing will start from. Strips off the "bench-modules-" and
@# replaces all "_" with "/".
@#
@# e.g. bench-modules-service_dynamodb
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..."
#####################
# Release Process #
#####################
.PHONY: preview-release pre-release-validation release
ls-changes:
go run ${REPOTOOLS_CMD_CHANGELOG} ls
preview-release:
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
pre-release-validation:
@if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \
echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \
fi
@if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \
echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \
fi
release: pre-release-validation
go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE}
go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE}
go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE}
go run ${REPOTOOLS_CMD_CHANGELOG} rm -all
go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE}
##############
# Repo Tools #
##############
.PHONY: install-repotools
install-repotools:
go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
set-smithy-go-version:
@if [[ -z "${SMITHY_GO_VERSION}" ]]; then \
echo "SMITHY_GO_VERSION is required to update SDK's smithy-go module dependency version" && false; \
fi
go run ${REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY} -s "github.com/aws/smithy-go" -v "${SMITHY_GO_VERSION}"
##################
# Linting/Verify #
##################
.PHONY: verify lint vet vet-modules-% sdkv1check
verify: lint vet sdkv1check
lint:
@echo "go lint SDK and vendor packages"
@lint=`golint ./...`; \
dolint=`echo "$$lint" | grep -E -v \
-e ${LINT_IGNORE_S3MANAGER_INPUT} \
-e ${LINTIGNORESINGLEFIGHT}`; \
echo "$$dolint"; \
if [ "$$dolint" != "" ]; then exit 1; fi
vet: vet-modules-.
vet-modules-%:
cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst vet-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go vet ${BUILD_TAGS} --all ./..."
sdkv1check:
@echo "Checking for usage of AWS SDK for Go v1"
@sdkv1usage=`go list -test -f '''{{ if not .Standard }}{{ range $$_, $$name := .Imports }} * {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ range $$_, $$name := .TestImports }} *: {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ end}}''' ./... | sort -u | grep '''/aws-sdk-go/'''`; \
echo "$$sdkv1usage"; \
if [ "$$sdkv1usage" != "" ]; then exit 1; fi
list-deps: list-deps-.
list-deps-%:
@# command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "list-deps-" and
@# replaces all "_" with "/".
@#
@# Trim output to only include stdout for list of dependencies only.
@# make list-deps 2>&-
@#
@# e.g. list-deps-internal_protocoltest
@cd ./internal/repotools/cmd/eachmodule \
&& go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \
"go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u
###################
# Sandbox Testing #
###################
.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip
sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip
sandbox-build-%:
@# sandbox-build-go1.17
@# sandbox-build-gotip
docker build \
-f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" .
sandbox-run-%: sandbox-build-%
@# sandbox-run-go1.17
@# sandbox-run-gotip
docker run -i -t "aws-sdk-go-$(subst sandbox-run-,,$@)" bash
sandbox-test-%: sandbox-build-%
@# sandbox-test-go1.17
@# sandbox-test-gotip
docker run -t "aws-sdk-go-$(subst sandbox-test-,,$@)"
update-aws-golang-tip:
docker build --no-cache=true -f ./internal/awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" .

@ -1,3 +0,0 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

@ -1,157 +0,0 @@
# AWS SDK for Go v2
[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language.
The v2 SDK requires a minimum version of `Go 1.15`.
Checkout out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug
fixes, updates, and features added to the SDK.
Jump To:
* [Getting Started](#getting-started)
* [Getting Help](#getting-help)
* [Contributing](#feedback-and-contributing)
* [More Resources](#resources)
## Maintenance and support for SDK major versions
For information about maintenance and support for SDK major versions and their underlying dependencies, see the
following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide:
* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html)
* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html)
## Getting started
To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`.
This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client.
###### Initialize Project
```sh
$ mkdir ~/helloaws
$ cd ~/helloaws
$ go mod init helloaws
```
###### Add SDK Dependencies
```sh
$ go get github.com/aws/aws-sdk-go-v2/aws
$ go get github.com/aws/aws-sdk-go-v2/config
$ go get github.com/aws/aws-sdk-go-v2/service/dynamodb
```
###### Write Code
In your preferred editor add the following content to `main.go`
```go
package main
import (
"context"
"fmt"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
)
func main() {
// Using the SDK's default configuration, loading additional config
// and credentials values from the environment variables, shared
// credentials, and shared configuration files
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2"))
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
// Using the Config value, create the DynamoDB client
svc := dynamodb.NewFromConfig(cfg)
// Build the request with its input parameters
resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{
Limit: aws.Int32(5),
})
if err != nil {
log.Fatalf("failed to list tables, %v", err)
}
fmt.Println("Tables:")
for _, tableName := range resp.TableNames {
fmt.Println(tableName)
}
}
```
###### Compile and Execute
```sh
$ go run .
Table:
tableOne
tableTwo
```
## Getting Help
Please use these community resources for getting help. We use the GitHub issues
for tracking bugs and feature requests.
* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag.
* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html).
* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose).
This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/).
### Opening Issues
If you encounter a bug with the AWS SDK for Go we would like to hear about it.
Search the [existing issues][Issues] and see
if others are also experiencing the issue before opening a new issue. Please
include the version of AWS SDK for Go, Go language, and OS youre using. Please
also include reproduction case when appropriate.
The GitHub issues are intended for bug reports and feature requests. For help
and questions with using AWS SDK for Go please make use of the resources listed
in the [Getting Help](#getting-help) section.
Keeping the list of open issues lean will help us respond in a timely manner.
## Feedback and contributing
The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch.
**Contributing**. You can open pull requests for fixes or additions to the AWS SDK for Go 2.0. All pull requests must be submitted under the Apache 2.0 license and will be reviewed by an SDK team member before being merged in. Accompanying unit tests, where possible, are appreciated.
## Resources
[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and
use the AWS SDK for Go V2.
[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go.
[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this
document to look up all API operation input and output parameters for AWS
services supported by the SDK. The API reference also includes documentation of
the SDK, and examples how to using the SDK, service client API operations, and
API operation require parameters.
[Service Documentation](https://aws.amazon.com/documentation/) - Use this
documentation to learn how to interface with AWS services. These guides are
great for getting started with a service, or when looking for more
information about a service. While this document is not required for coding,
services may supply helpful samples to look out for.
[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback
[Issues] - Report issues, submit pull requests, and get involved
(see [Apache 2.0 License][license])
[Dep]: https://github.com/golang/dep
[Issues]: https://github.com/aws/aws-sdk-go-v2/issues
[Projects]: https://github.com/aws/aws-sdk-go-v2/projects
[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md
[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/
[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md
[license]: http://aws.amazon.com/apache2.0/

@ -1,92 +0,0 @@
// Package arn provides a parser for interacting with Amazon Resource Names.
package arn
import (
"errors"
"strings"
)
const (
arnDelimiter = ":"
arnSections = 6
arnPrefix = "arn:"
// zero-indexed
sectionPartition = 1
sectionService = 2
sectionRegion = 3
sectionAccountID = 4
sectionResource = 5
// errors
invalidPrefix = "arn: invalid prefix"
invalidSections = "arn: not enough sections"
)
// ARN captures the individual fields of an Amazon Resource Name.
// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
type ARN struct {
// The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
// other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
// (Beijing) region is "aws-cn".
Partition string
// The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
// namespaces, see
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
Service string
// The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
// component might be omitted.
Region string
// The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
// ARNs for some resources don't require an account number, so this component might be omitted.
AccountID string
// The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
// for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
// resource name itself. Some services allows paths for resource names, as described in
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
Resource string
}
// Parse parses an ARN into its constituent parts.
//
// Some example ARNs:
// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
// arn:aws:iam::123456789012:user/David
// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
// arn:aws:s3:::my_corporate_bucket/exampleobject.png
func Parse(arn string) (ARN, error) {
if !strings.HasPrefix(arn, arnPrefix) {
return ARN{}, errors.New(invalidPrefix)
}
sections := strings.SplitN(arn, arnDelimiter, arnSections)
if len(sections) != arnSections {
return ARN{}, errors.New(invalidSections)
}
return ARN{
Partition: sections[sectionPartition],
Service: sections[sectionService],
Region: sections[sectionRegion],
AccountID: sections[sectionAccountID],
Resource: sections[sectionResource],
}, nil
}
// IsARN returns whether the given string is an arn
// by looking for whether the string starts with arn:
func IsARN(arn string) bool {
return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
}
// String returns the canonical representation of the ARN
func (arn ARN) String() string {
return arnPrefix +
arn.Partition + arnDelimiter +
arn.Service + arnDelimiter +
arn.Region + arnDelimiter +
arn.AccountID + arnDelimiter +
arn.Resource
}

@ -1,179 +0,0 @@
package aws
import (
"net/http"
smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
// HTTPClient provides the interface to provide custom HTTPClients. Generally
// *http.Client is sufficient for most use cases. The HTTPClient should not
// follow 301 or 302 redirects.
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// A Config provides service configuration for service clients.
type Config struct {
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for
// information on AWS regions.
Region string
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials CredentialsProvider
// The Bearer Authentication token provider to use for authenticating API
// operation calls with a Bearer Authentication token. The API clients and
// operation must support Bearer Authentication scheme in order for the
// token provider to be used. API clients created with NewFromConfig will
// automatically be configured with this option, if the API client support
// Bearer Authentication.
//
// The SDK's config.LoadDefaultConfig can automatically populate this
// option for external configuration options such as SSO session.
// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
BearerAuthTokenProvider smithybearer.TokenProvider
// The HTTP Client the SDK's API clients will use to invoke HTTP requests.
// The SDK defaults to a BuildableClient allowing API clients to create
// copies of the HTTP Client for service specific customizations.
//
// Use a (*http.Client) for custom behavior. Using a custom http.Client
// will prevent the SDK from modifying the HTTP client.
HTTPClient HTTPClient
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// See the `aws.EndpointResolver` documentation for additional usage
// information.
//
// Deprecated: See Config.EndpointResolverWithOptions
EndpointResolver EndpointResolver
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// When EndpointResolverWithOptions is specified, it will be used by a
// service client rather than using EndpointResolver if also specified.
//
// See the `aws.EndpointResolverWithOptions` documentation for additional
// usage information.
EndpointResolverWithOptions EndpointResolverWithOptions
// RetryMaxAttempts specifies the maximum number attempts an API client
// will call an operation that fails with a retryable error.
//
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMaxAttempts int
// RetryMode specifies the retry model the API client will be created with.
//
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMode RetryMode
// Retryer is a function that provides a Retryer implementation. A Retryer
// guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
//
// In general, the provider function should return a new instance of a
// Retryer if you are attempting to provide a consistent Retryer
// configuration across all clients. This will ensure that each client will
// be provided a new instance of the Retryer implementation, and will avoid
// issues such as sharing the same retry token bucket across services.
//
// If not nil, RetryMaxAttempts, and RetryMode will be ignored by API
// clients.
Retryer func() Retryer
// ConfigSources are the sources that were used to construct the Config.
// Allows for additional configuration to be loaded by clients.
ConfigSources []interface{}
// APIOptions provides the set of middleware mutations modify how the API
// client requests will be handled. This is useful for adding additional
// tracing data to a request, or changing behavior of the SDK's client.
APIOptions []func(*middleware.Stack) error
// The logger writer interface to write logging messages to. Defaults to
// standard error.
Logger logging.Logger
// Configures the events that will be sent to the configured logger. This
// can be used to configure the logging of signing, retries, request, and
// responses of the SDK clients.
//
// See the ClientLogMode type documentation for the complete set of logging
// modes and available configuration.
ClientLogMode ClientLogMode
// The configured DefaultsMode. If not specified, service clients will
// default to legacy.
//
// Supported modes are: auto, cross-region, in-region, legacy, mobile,
// standard
DefaultsMode DefaultsMode
// The RuntimeEnvironment configuration, only populated if the DefaultsMode
// is set to DefaultsModeAuto and is initialized by
// `config.LoadDefaultConfig`. You should not populate this structure
// programmatically, or rely on the values here within your applications.
RuntimeEnvironment RuntimeEnvironment
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
func NewConfig() *Config {
return &Config{}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c Config) Copy() Config {
cp := c
return cp
}
// EndpointDiscoveryEnableState indicates if endpoint discovery is
// enabled, disabled, auto or unset state.
//
// Default behavior (Auto or Unset) indicates operations that require endpoint
// discovery will use Endpoint Discovery by default. Operations that
// optionally use Endpoint Discovery will not use Endpoint Discovery
// unless EndpointDiscovery is explicitly enabled.
type EndpointDiscoveryEnableState uint
// Enumeration values for EndpointDiscoveryEnableState
const (
// EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset.
// Users do not need to use this value explicitly. The behavior for unset
// is the same as for EndpointDiscoveryAuto.
EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota
// EndpointDiscoveryAuto represents an AUTO state that allows endpoint
// discovery only when required by the api. This is the default
// configuration resolved by the client if endpoint discovery is neither
// enabled or disabled.
EndpointDiscoveryAuto // default state
// EndpointDiscoveryDisabled indicates client MUST not perform endpoint
// discovery even when required.
EndpointDiscoveryDisabled
// EndpointDiscoveryEnabled indicates client MUST always perform endpoint
// discovery if supported for the operation.
EndpointDiscoveryEnabled
)

@ -1,22 +0,0 @@
package aws
import (
"context"
"time"
)
type suppressedContext struct {
context.Context
}
func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
func (s *suppressedContext) Done() <-chan struct{} {
return nil
}
func (s *suppressedContext) Err() error {
return nil
}

@ -1,218 +0,0 @@
package aws
import (
"context"
"fmt"
"sync/atomic"
"time"
sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
"github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
)
// CredentialsCacheOptions are the options
type CredentialsCacheOptions struct {
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// An ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired. This can cause an
// increased number of requests to refresh the credentials to occur.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// ExpiryWindowJitterFrac provides a mechanism for randomizing the
// expiration of credentials within the configured ExpiryWindow by a random
// percentage. Valid values are between 0.0 and 1.0.
//
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac
// is 0.5 then credentials will be set to expire between 30 to 60 seconds
// prior to their actual expiration time.
//
// If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
// If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
// If ExpiryWindowJitterFrac < 0 the value will be treated as 0.
// If ExpiryWindowJitterFrac > 1 the value will be treated as 1.
ExpiryWindowJitterFrac float64
}
// CredentialsCache provides caching and concurrency safe credentials retrieval
// via the provider's retrieve method.
//
// CredentialsCache will look for optional interfaces on the Provider to adjust
// how the credential cache handles credentials caching.
//
// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
// credential refresh failures. This could return an updated Credentials
// value, or attempt another means of retrieving credentials.
//
// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
// credentials Expires is modified. This could modify how the Credentials
// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
// Such as providing a floor not to reduce the Expires below.
type CredentialsCache struct {
provider CredentialsProvider
options CredentialsCacheOptions
creds atomic.Value
sf singleflight.Group
}
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider
// is expected to not be nil. A variadic list of one or more functions can be
// provided to modify the CredentialsCache configuration. This allows for
// configuration of credential expiry window and jitter.
func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
options := CredentialsCacheOptions{}
for _, fn := range optFns {
fn(&options)
}
if options.ExpiryWindow < 0 {
options.ExpiryWindow = 0
}
if options.ExpiryWindowJitterFrac < 0 {
options.ExpiryWindowJitterFrac = 0
} else if options.ExpiryWindowJitterFrac > 1 {
options.ExpiryWindowJitterFrac = 1
}
return &CredentialsCache{
provider: provider,
options: options,
}
}
// Retrieve returns the credentials. If the credentials have already been
// retrieved, and not expired the cached credentials will be returned. If the
// credentials have not been retrieved yet, or expired the provider's Retrieve
// method will be called.
//
// Returns and error if the provider's retrieve method returns an error.
func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
if creds, ok := p.getCreds(); ok && !creds.Expired() {
return creds, nil
}
resCh := p.sf.DoChan("", func() (interface{}, error) {
return p.singleRetrieve(&suppressedContext{ctx})
})
select {
case res := <-resCh:
return res.Val.(Credentials), res.Err
case <-ctx.Done():
return Credentials{}, &RequestCanceledError{Err: ctx.Err()}
}
}
func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
currCreds, ok := p.getCreds()
if ok && !currCreds.Expired() {
return currCreds, nil
}
newCreds, err := p.provider.Retrieve(ctx)
if err != nil {
handleFailToRefresh := defaultHandleFailToRefresh
if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok {
handleFailToRefresh = cs.HandleFailToRefresh
}
newCreds, err = handleFailToRefresh(ctx, currCreds, err)
if err != nil {
return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err)
}
}
if newCreds.CanExpire && p.options.ExpiryWindow > 0 {
adjustExpiresBy := defaultAdjustExpiresBy
if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok {
adjustExpiresBy = cs.AdjustExpiresBy
}
randFloat64, err := sdkrand.CryptoRandFloat64()
if err != nil {
return Credentials{}, fmt.Errorf("failed to get random provider, %w", err)
}
var jitter time.Duration
if p.options.ExpiryWindowJitterFrac > 0 {
jitter = time.Duration(randFloat64 *
p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
}
newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter))
if err != nil {
return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err)
}
}
p.creds.Store(&newCreds)
return newCreds, nil
}
// getCreds returns the currently stored credentials and true. Returning false
// if no credentials were stored.
func (p *CredentialsCache) getCreds() (Credentials, bool) {
v := p.creds.Load()
if v == nil {
return Credentials{}, false
}
c := v.(*Credentials)
if c == nil || !c.HasKeys() {
return Credentials{}, false
}
return *c, true
}
// Invalidate will invalidate the cached credentials. The next call to Retrieve
// will cause the provider's Retrieve method to be called.
func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil))
}
// HandleFailRefreshCredentialsCacheStrategy is an interface for
// CredentialsCache to allow CredentialsProvider how failed to refresh
// credentials is handled.
type HandleFailRefreshCredentialsCacheStrategy interface {
// Given the previously cached Credentials, if any, and refresh error, may
// returns new or modified set of Credentials, or error.
//
// Credential caches may use default implementation if nil.
HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error)
}
// defaultHandleFailToRefresh returns the passed in error.
func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) {
return Credentials{}, err
}
// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache
// to allow CredentialsProvider to intercept adjustments to Credentials expiry
// based on expectations and use cases of CredentialsProvider.
//
// Credential caches may use default implementation if nil.
type AdjustExpiresByCredentialsCacheStrategy interface {
// Given a Credentials as input, applying any mutations and
// returning the potentially updated Credentials, or error.
AdjustExpiresBy(Credentials, time.Duration) (Credentials, error)
}
// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires,
// and returns the updated credentials value. If Credentials value's CanExpire
// is false, the passed in credentials are returned unchanged.
func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) {
if !creds.CanExpire {
return creds, nil
}
creds.Expires = creds.Expires.Add(dur)
return creds, nil
}

@ -1,131 +0,0 @@
package aws
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
// AnonymousCredentials provides a sentinel CredentialsProvider that should be
// used to instruct the SDK's signing middleware to not sign the request.
//
// Using `nil` credentials when configuring an API client will achieve the same
// result. The AnonymousCredentials type allows you to configure the SDK's
// external config loading to not attempt to source credentials from the shared
// config or environment.
//
// For example you can use this CredentialsProvider with an API client's
// Options to instruct the client not to sign a request for accessing public
// S3 bucket objects.
//
// The following example demonstrates using the AnonymousCredentials to prevent
// SDK's external config loading attempt to resolve credentials.
//
// cfg, err := config.LoadDefaultConfig(context.TODO(),
// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
// )
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(cfg)
//
// Alternatively you can leave the API client Option's `Credential` member to
// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
// the `Credentials` member to nil, if the external config resolved a
// credential provider.
//
// client := s3.New(s3.Options{
// // Credentials defaults to a nil value.
// })
//
// This can also be configured for specific operations calls too.
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(config)
//
// result, err := client.GetObject(context.TODO(), s3.GetObject{
// Bucket: aws.String("example-bucket"),
// Key: aws.String("example-key"),
// }, func(o *s3.Options) {
// o.Credentials = nil
// // Or
// o.Credentials = aws.AnonymousCredentials{}
// })
type AnonymousCredentials struct{}
// Retrieve implements the CredentialsProvider interface, but will always
// return error, and cannot be used to sign a request. The AnonymousCredentials
// type is used as a sentinel type instructing the AWS request signing
// middleware to not sign a request.
func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
return Credentials{Source: "AnonymousCredentials"},
fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
}
// A Credentials is the AWS credentials value for individual credential fields.
type Credentials struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Source of the credentials
Source string
// States if the credentials can expire or not.
CanExpire bool
// The time the credentials will expire at. Should be ignored if CanExpire
// is false.
Expires time.Time
}
// Expired returns if the credentials have expired.
func (v Credentials) Expired() bool {
if v.CanExpire {
// Calling Round(0) on the current time will truncate the monotonic
// reading only. Ensures credential expiry time is always based on
// reported wall-clock time.
return !v.Expires.After(sdk.NowTime().Round(0))
}
return false
}
// HasKeys returns if the credentials keys are set.
func (v Credentials) HasKeys() bool {
return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
}
// A CredentialsProvider is the interface for any component which will provide
// credentials Credentials. A CredentialsProvider is required to manage its own
// Expired state, and what to be expired means.
//
// A credentials provider implementation can be wrapped with a CredentialCache
// to cache the credential value retrieved. Without the cache the SDK will
// attempt to retrieve the credentials for every request.
type CredentialsProvider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve(ctx context.Context) (Credentials, error)
}
// CredentialsProviderFunc provides a helper wrapping a function value to
// satisfy the CredentialsProvider interface.
type CredentialsProviderFunc func(context.Context) (Credentials, error)
// Retrieve delegates to the function value the CredentialsProviderFunc wraps.
func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
return fn(ctx)
}

@ -1,38 +0,0 @@
package defaults
import (
"github.com/aws/aws-sdk-go-v2/aws"
"runtime"
"strings"
)
var getGOOS = func() string {
return runtime.GOOS
}
// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode
// is set to aws.DefaultsModeAuto.
func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode {
goos := getGOOS()
if goos == "android" || goos == "ios" {
return aws.DefaultsModeMobile
}
var currentRegion string
if len(environment.EnvironmentIdentifier) > 0 {
currentRegion = environment.Region
}
if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 {
currentRegion = environment.EC2InstanceMetadataRegion
}
if len(region) > 0 && len(currentRegion) > 0 {
if strings.EqualFold(region, currentRegion) {
return aws.DefaultsModeInRegion
}
return aws.DefaultsModeCrossRegion
}
return aws.DefaultsModeStandard
}

@ -1,43 +0,0 @@
package defaults
import (
"time"
"github.com/aws/aws-sdk-go-v2/aws"
)
// Configuration is the set of SDK configuration options that are determined based
// on the configured DefaultsMode.
type Configuration struct {
// RetryMode is the configuration's default retry mode API clients should
// use for constructing a Retryer.
RetryMode aws.RetryMode
// ConnectTimeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// See https://pkg.go.dev/net#Dialer.Timeout
ConnectTimeout *time.Duration
// TLSNegotiationTimeout specifies the maximum amount of time waiting to
// wait for a TLS handshake.
//
// See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout
TLSNegotiationTimeout *time.Duration
}
// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set.
func (c *Configuration) GetConnectTimeout() (time.Duration, bool) {
if c.ConnectTimeout == nil {
return 0, false
}
return *c.ConnectTimeout, true
}
// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set.
func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) {
if c.TLSNegotiationTimeout == nil {
return 0, false
}
return *c.TLSNegotiationTimeout, true
}

@ -1,50 +0,0 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT.
package defaults
import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"time"
)
// GetModeConfiguration returns the default Configuration descriptor for the given mode.
//
// Supports the following modes: cross-region, in-region, mobile, standard
func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) {
var mv aws.DefaultsMode
mv.SetFromString(string(mode))
switch mv {
case aws.DefaultsModeCrossRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeInRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(1100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeMobile:
settings := Configuration{
ConnectTimeout: aws.Duration(30000 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeStandard:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
default:
return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode)
}
}

@ -1,2 +0,0 @@
// Package defaults provides recommended configuration values for AWS SDKs and CLIs.
package defaults

@ -1,95 +0,0 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT.
package aws
import (
"strings"
)
// DefaultsMode is the SDK defaults mode setting.
type DefaultsMode string
// The DefaultsMode constants.
const (
// DefaultsModeAuto is an experimental mode that builds on the standard mode.
// The SDK will attempt to discover the execution environment to determine the
// appropriate settings automatically.
//
// Note that the auto detection is heuristics-based and does not guarantee 100%
// accuracy. STANDARD mode will be used if the execution environment cannot
// be determined. The auto detection might query EC2 Instance Metadata service
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html),
// which might introduce latency. Therefore we recommend choosing an explicit
// defaults_mode instead if startup latency is critical to your application
DefaultsModeAuto DefaultsMode = "auto"
// DefaultsModeCrossRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services in a different region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeCrossRegion DefaultsMode = "cross-region"
// DefaultsModeInRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services from within the same AWS
// region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeInRegion DefaultsMode = "in-region"
// DefaultsModeLegacy provides default settings that vary per SDK and were used
// prior to establishment of defaults_mode
DefaultsModeLegacy DefaultsMode = "legacy"
// DefaultsModeMobile builds on the standard mode and includes optimization
// tailored for mobile applications
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeMobile DefaultsMode = "mobile"
// DefaultsModeStandard provides the latest recommended default values that
// should be safe to run in most scenarios
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeStandard DefaultsMode = "standard"
)
// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches
// the provided string when compared using EqualFold. If the value does not match a known
// constant it will be set to as-is and the function will return false. As a special case, if the
// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode.
func (d *DefaultsMode) SetFromString(v string) (ok bool) {
switch {
case strings.EqualFold(v, string(DefaultsModeAuto)):
*d = DefaultsModeAuto
ok = true
case strings.EqualFold(v, string(DefaultsModeCrossRegion)):
*d = DefaultsModeCrossRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeInRegion)):
*d = DefaultsModeInRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeLegacy)):
*d = DefaultsModeLegacy
ok = true
case strings.EqualFold(v, string(DefaultsModeMobile)):
*d = DefaultsModeMobile
ok = true
case strings.EqualFold(v, string(DefaultsModeStandard)):
*d = DefaultsModeStandard
ok = true
case len(v) == 0:
*d = DefaultsModeLegacy
ok = true
default:
*d = DefaultsMode(v)
}
return ok
}

@ -1,62 +0,0 @@
// Package aws provides the core SDK's utilities and shared types. Use this package's
// utilities to simplify setting and reading API operations parameters.
//
// # Value and Pointer Conversion Utilities
//
// This package includes a helper conversion utility for each scalar type the SDK's
// API use. These utilities make getting a pointer of the scalar, and dereferencing
// a pointer easier.
//
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
// The Pointer to value will safely dereference the pointer and return its value.
// If the pointer was nil, the scalar's zero value will be returned.
//
// The value to pointer functions will be named after the scalar type. So get a
// *string from a string value use the "String" function. This makes it easy to
// to get pointer of a literal string value, because getting the address of a
// literal requires assigning the value to a variable first.
//
// var strPtr *string
//
// // Without the SDK's conversion functions
// str := "my string"
// strPtr = &str
//
// // With the SDK's conversion functions
// strPtr = aws.String("my string")
//
// // Convert *string to string value
// str = aws.ToString(strPtr)
//
// In addition to scalars the aws package also includes conversion utilities for
// map and slice for commonly types used in API parameters. The map and slice
// conversion functions use similar naming pattern as the scalar conversion
// functions.
//
// var strPtrs []*string
// var strs []string = []string{"Go", "Gophers", "Go"}
//
// // Convert []string to []*string
// strPtrs = aws.StringSlice(strs)
//
// // Convert []*string to []string
// strs = aws.ToStringSlice(strPtrs)
//
// # SDK Default HTTP Client
//
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
// the SDK's Session, or service client constructor. This means that if the
// http.DefaultClient is modified by other components of your application the
// modifications will be picked up by the SDK as well.
//
// In some cases this might be intended, but it is a better practice to create
// a custom HTTP Client to share explicitly through your application. You can
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
// value of the SDK's Config type when creating a Session or service client.
package aws
// generate.go uses a build tag of "ignore", go run doesn't need to specify
// this because go run ignores all build flags when running a go file directly.
//go:generate go run -tags codegen generate.go
//go:generate go run -tags codegen logging_generate.go
//go:generate gofmt -w -s .

@ -1,229 +0,0 @@
package aws
import (
"fmt"
)
// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior.
type DualStackEndpointState uint
const (
// DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution.
DualStackEndpointStateUnset DualStackEndpointState = iota
// DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints.
DualStackEndpointStateEnabled
// DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
DualStackEndpointStateDisabled
)
// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) {
type iface interface {
GetUseDualStackEndpoint() DualStackEndpointState
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetUseDualStackEndpoint()
found = true
break
}
}
return value, found
}
// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
type FIPSEndpointState uint
const (
// FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
FIPSEndpointStateUnset FIPSEndpointState = iota
// FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
FIPSEndpointStateEnabled
// FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
FIPSEndpointStateDisabled
)
// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) {
type iface interface {
GetUseFIPSEndpoint() FIPSEndpointState
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetUseFIPSEndpoint()
found = true
break
}
}
return value, found
}
// Endpoint represents the endpoint a service client should make API operation
// calls to.
//
// The SDK will automatically resolve these endpoints per API client using an
// internal endpoint resolvers. If you'd like to provide custom endpoint
// resolving behavior you can implement the EndpointResolver interface.
type Endpoint struct {
// The base URL endpoint the SDK API clients will use to make API calls to.
// The SDK will suffix URI path and query elements to this endpoint.
URL string
// Specifies if the endpoint's hostname can be modified by the SDK's API
// client.
//
// If the hostname is mutable the SDK API clients may modify any part of
// the hostname based on the requirements of the API, (e.g. adding, or
// removing content in the hostname). Such as, Amazon S3 API client
// prefixing "bucketname" to the hostname, or changing the
// hostname service name component from "s3." to "s3-accesspoint.dualstack."
// for the dualstack endpoint of an S3 Accesspoint resource.
//
// Care should be taken when providing a custom endpoint for an API. If the
// endpoint hostname is mutable, and the client cannot modify the endpoint
// correctly, the operation call will most likely fail, or have undefined
// behavior.
//
// If hostname is immutable, the SDK API clients will not modify the
// hostname of the URL. This may cause the API client not to function
// correctly if the API requires the operation specific hostname values
// to be used by the client.
//
// This flag does not modify the API client's behavior if this endpoint
// will be used instead of Endpoint Discovery, or if the endpoint will be
// used to perform Endpoint Discovery. That behavior is configured via the
// API Client's Options.
HostnameImmutable bool
// The AWS partition the endpoint belongs to.
PartitionID string
// The service name that should be used for signing the requests to the
// endpoint.
SigningName string
// The region that should be used for signing the request to the endpoint.
SigningRegion string
// The signing method that should be used for signing the requests to the
// endpoint.
SigningMethod string
// The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata.
// When providing a custom endpoint, you should set the source as EndpointSourceCustom.
// If source is not provided when providing a custom endpoint, the SDK may not
// perform required host mutations correctly. Source should be used along with
// HostnameImmutable property as per the usage requirement.
Source EndpointSource
}
// EndpointSource is the endpoint source type.
type EndpointSource int
const (
// EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source.
EndpointSourceServiceMetadata EndpointSource = iota
// EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when
// user provides a custom endpoint to be used by the SDK.
EndpointSourceCustom
)
// EndpointNotFoundError is a sentinel error to indicate that the
// EndpointResolver implementation was unable to resolve an endpoint for the
// given service and region. Resolvers should use this to indicate that an API
// client should fallback and attempt to use it's internal default resolver to
// resolve the endpoint.
type EndpointNotFoundError struct {
Err error
}
// Error is the error message.
func (e *EndpointNotFoundError) Error() string {
return fmt.Sprintf("endpoint not found, %v", e.Err)
}
// Unwrap returns the underlying error.
func (e *EndpointNotFoundError) Unwrap() error {
return e.Err
}
// EndpointResolver is an endpoint resolver that can be used to provide or
// override an endpoint for the given service and region. API clients will
// attempt to use the EndpointResolver first to resolve an endpoint if
// available. If the EndpointResolver returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
//
// Deprecated: See EndpointResolverWithOptions
type EndpointResolver interface {
ResolveEndpoint(service, region string) (Endpoint, error)
}
// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
//
// Deprecated: See EndpointResolverWithOptionsFunc
type EndpointResolverFunc func(service, region string) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
//
// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint
func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
return e(service, region)
}
// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or
// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will
// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if
// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
type EndpointResolverWithOptions interface {
ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error)
}
// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) {
return e(service, region, options...)
}
// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value.
// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS.
func GetDisableHTTPS(options ...interface{}) (value bool, found bool) {
type iface interface {
GetDisableHTTPS() bool
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetDisableHTTPS()
found = true
break
}
}
return value, found
}
// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value.
// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion.
func GetResolvedRegion(options ...interface{}) (value string, found bool) {
type iface interface {
GetResolvedRegion() string
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetResolvedRegion()
found = true
break
}
}
return value, found
}

@ -1,9 +0,0 @@
package aws
// MissingRegionError is an error that is returned if region configuration
// value was not found.
type MissingRegionError struct{}
func (*MissingRegionError) Error() string {
return "an AWS region is required, but was not found"
}

@ -1,365 +0,0 @@
// Code generated by aws/generate.go DO NOT EDIT.
package aws
import (
"github.com/aws/smithy-go/ptr"
"time"
)
// ToBool returns bool value dereferenced if the passed
// in pointer was not nil. Returns a bool zero value if the
// pointer was nil.
func ToBool(p *bool) (v bool) {
return ptr.ToBool(p)
}
// ToBoolSlice returns a slice of bool values, that are
// dereferenced if the passed in pointer was not nil. Returns a bool
// zero value if the pointer was nil.
func ToBoolSlice(vs []*bool) []bool {
return ptr.ToBoolSlice(vs)
}
// ToBoolMap returns a map of bool values, that are
// dereferenced if the passed in pointer was not nil. The bool
// zero value is used if the pointer was nil.
func ToBoolMap(vs map[string]*bool) map[string]bool {
return ptr.ToBoolMap(vs)
}
// ToByte returns byte value dereferenced if the passed
// in pointer was not nil. Returns a byte zero value if the
// pointer was nil.
func ToByte(p *byte) (v byte) {
return ptr.ToByte(p)
}
// ToByteSlice returns a slice of byte values, that are
// dereferenced if the passed in pointer was not nil. Returns a byte
// zero value if the pointer was nil.
func ToByteSlice(vs []*byte) []byte {
return ptr.ToByteSlice(vs)
}
// ToByteMap returns a map of byte values, that are
// dereferenced if the passed in pointer was not nil. The byte
// zero value is used if the pointer was nil.
func ToByteMap(vs map[string]*byte) map[string]byte {
return ptr.ToByteMap(vs)
}
// ToString returns string value dereferenced if the passed
// in pointer was not nil. Returns a string zero value if the
// pointer was nil.
func ToString(p *string) (v string) {
return ptr.ToString(p)
}
// ToStringSlice returns a slice of string values, that are
// dereferenced if the passed in pointer was not nil. Returns a string
// zero value if the pointer was nil.
func ToStringSlice(vs []*string) []string {
return ptr.ToStringSlice(vs)
}
// ToStringMap returns a map of string values, that are
// dereferenced if the passed in pointer was not nil. The string
// zero value is used if the pointer was nil.
func ToStringMap(vs map[string]*string) map[string]string {
return ptr.ToStringMap(vs)
}
// ToInt returns int value dereferenced if the passed
// in pointer was not nil. Returns a int zero value if the
// pointer was nil.
func ToInt(p *int) (v int) {
return ptr.ToInt(p)
}
// ToIntSlice returns a slice of int values, that are
// dereferenced if the passed in pointer was not nil. Returns a int
// zero value if the pointer was nil.
func ToIntSlice(vs []*int) []int {
return ptr.ToIntSlice(vs)
}
// ToIntMap returns a map of int values, that are
// dereferenced if the passed in pointer was not nil. The int
// zero value is used if the pointer was nil.
func ToIntMap(vs map[string]*int) map[string]int {
return ptr.ToIntMap(vs)
}
// ToInt8 returns int8 value dereferenced if the passed
// in pointer was not nil. Returns a int8 zero value if the
// pointer was nil.
func ToInt8(p *int8) (v int8) {
return ptr.ToInt8(p)
}
// ToInt8Slice returns a slice of int8 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int8
// zero value if the pointer was nil.
func ToInt8Slice(vs []*int8) []int8 {
return ptr.ToInt8Slice(vs)
}
// ToInt8Map returns a map of int8 values, that are
// dereferenced if the passed in pointer was not nil. The int8
// zero value is used if the pointer was nil.
func ToInt8Map(vs map[string]*int8) map[string]int8 {
return ptr.ToInt8Map(vs)
}
// ToInt16 returns int16 value dereferenced if the passed
// in pointer was not nil. Returns a int16 zero value if the
// pointer was nil.
func ToInt16(p *int16) (v int16) {
return ptr.ToInt16(p)
}
// ToInt16Slice returns a slice of int16 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int16
// zero value if the pointer was nil.
func ToInt16Slice(vs []*int16) []int16 {
return ptr.ToInt16Slice(vs)
}
// ToInt16Map returns a map of int16 values, that are
// dereferenced if the passed in pointer was not nil. The int16
// zero value is used if the pointer was nil.
func ToInt16Map(vs map[string]*int16) map[string]int16 {
return ptr.ToInt16Map(vs)
}
// ToInt32 returns int32 value dereferenced if the passed
// in pointer was not nil. Returns a int32 zero value if the
// pointer was nil.
func ToInt32(p *int32) (v int32) {
return ptr.ToInt32(p)
}
// ToInt32Slice returns a slice of int32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int32
// zero value if the pointer was nil.
func ToInt32Slice(vs []*int32) []int32 {
return ptr.ToInt32Slice(vs)
}
// ToInt32Map returns a map of int32 values, that are
// dereferenced if the passed in pointer was not nil. The int32
// zero value is used if the pointer was nil.
func ToInt32Map(vs map[string]*int32) map[string]int32 {
return ptr.ToInt32Map(vs)
}
// ToInt64 returns int64 value dereferenced if the passed
// in pointer was not nil. Returns a int64 zero value if the
// pointer was nil.
func ToInt64(p *int64) (v int64) {
return ptr.ToInt64(p)
}
// ToInt64Slice returns a slice of int64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int64
// zero value if the pointer was nil.
func ToInt64Slice(vs []*int64) []int64 {
return ptr.ToInt64Slice(vs)
}
// ToInt64Map returns a map of int64 values, that are
// dereferenced if the passed in pointer was not nil. The int64
// zero value is used if the pointer was nil.
func ToInt64Map(vs map[string]*int64) map[string]int64 {
return ptr.ToInt64Map(vs)
}
// ToUint returns uint value dereferenced if the passed
// in pointer was not nil. Returns a uint zero value if the
// pointer was nil.
func ToUint(p *uint) (v uint) {
return ptr.ToUint(p)
}
// ToUintSlice returns a slice of uint values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint
// zero value if the pointer was nil.
func ToUintSlice(vs []*uint) []uint {
return ptr.ToUintSlice(vs)
}
// ToUintMap returns a map of uint values, that are
// dereferenced if the passed in pointer was not nil. The uint
// zero value is used if the pointer was nil.
func ToUintMap(vs map[string]*uint) map[string]uint {
return ptr.ToUintMap(vs)
}
// ToUint8 returns uint8 value dereferenced if the passed
// in pointer was not nil. Returns a uint8 zero value if the
// pointer was nil.
func ToUint8(p *uint8) (v uint8) {
return ptr.ToUint8(p)
}
// ToUint8Slice returns a slice of uint8 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint8
// zero value if the pointer was nil.
func ToUint8Slice(vs []*uint8) []uint8 {
return ptr.ToUint8Slice(vs)
}
// ToUint8Map returns a map of uint8 values, that are
// dereferenced if the passed in pointer was not nil. The uint8
// zero value is used if the pointer was nil.
func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
return ptr.ToUint8Map(vs)
}
// ToUint16 returns uint16 value dereferenced if the passed
// in pointer was not nil. Returns a uint16 zero value if the
// pointer was nil.
func ToUint16(p *uint16) (v uint16) {
return ptr.ToUint16(p)
}
// ToUint16Slice returns a slice of uint16 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint16
// zero value if the pointer was nil.
func ToUint16Slice(vs []*uint16) []uint16 {
return ptr.ToUint16Slice(vs)
}
// ToUint16Map returns a map of uint16 values, that are
// dereferenced if the passed in pointer was not nil. The uint16
// zero value is used if the pointer was nil.
func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
return ptr.ToUint16Map(vs)
}
// ToUint32 returns uint32 value dereferenced if the passed
// in pointer was not nil. Returns a uint32 zero value if the
// pointer was nil.
func ToUint32(p *uint32) (v uint32) {
return ptr.ToUint32(p)
}
// ToUint32Slice returns a slice of uint32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint32
// zero value if the pointer was nil.
func ToUint32Slice(vs []*uint32) []uint32 {
return ptr.ToUint32Slice(vs)
}
// ToUint32Map returns a map of uint32 values, that are
// dereferenced if the passed in pointer was not nil. The uint32
// zero value is used if the pointer was nil.
func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
return ptr.ToUint32Map(vs)
}
// ToUint64 returns uint64 value dereferenced if the passed
// in pointer was not nil. Returns a uint64 zero value if the
// pointer was nil.
func ToUint64(p *uint64) (v uint64) {
return ptr.ToUint64(p)
}
// ToUint64Slice returns a slice of uint64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint64
// zero value if the pointer was nil.
func ToUint64Slice(vs []*uint64) []uint64 {
return ptr.ToUint64Slice(vs)
}
// ToUint64Map returns a map of uint64 values, that are
// dereferenced if the passed in pointer was not nil. The uint64
// zero value is used if the pointer was nil.
func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
return ptr.ToUint64Map(vs)
}
// ToFloat32 returns float32 value dereferenced if the passed
// in pointer was not nil. Returns a float32 zero value if the
// pointer was nil.
func ToFloat32(p *float32) (v float32) {
return ptr.ToFloat32(p)
}
// ToFloat32Slice returns a slice of float32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a float32
// zero value if the pointer was nil.
func ToFloat32Slice(vs []*float32) []float32 {
return ptr.ToFloat32Slice(vs)
}
// ToFloat32Map returns a map of float32 values, that are
// dereferenced if the passed in pointer was not nil. The float32
// zero value is used if the pointer was nil.
func ToFloat32Map(vs map[string]*float32) map[string]float32 {
return ptr.ToFloat32Map(vs)
}
// ToFloat64 returns float64 value dereferenced if the passed
// in pointer was not nil. Returns a float64 zero value if the
// pointer was nil.
func ToFloat64(p *float64) (v float64) {
return ptr.ToFloat64(p)
}
// ToFloat64Slice returns a slice of float64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a float64
// zero value if the pointer was nil.
func ToFloat64Slice(vs []*float64) []float64 {
return ptr.ToFloat64Slice(vs)
}
// ToFloat64Map returns a map of float64 values, that are
// dereferenced if the passed in pointer was not nil. The float64
// zero value is used if the pointer was nil.
func ToFloat64Map(vs map[string]*float64) map[string]float64 {
return ptr.ToFloat64Map(vs)
}
// ToTime returns time.Time value dereferenced if the passed
// in pointer was not nil. Returns a time.Time zero value if the
// pointer was nil.
func ToTime(p *time.Time) (v time.Time) {
return ptr.ToTime(p)
}
// ToTimeSlice returns a slice of time.Time values, that are
// dereferenced if the passed in pointer was not nil. Returns a time.Time
// zero value if the pointer was nil.
func ToTimeSlice(vs []*time.Time) []time.Time {
return ptr.ToTimeSlice(vs)
}
// ToTimeMap returns a map of time.Time values, that are
// dereferenced if the passed in pointer was not nil. The time.Time
// zero value is used if the pointer was nil.
func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
return ptr.ToTimeMap(vs)
}
// ToDuration returns time.Duration value dereferenced if the passed
// in pointer was not nil. Returns a time.Duration zero value if the
// pointer was nil.
func ToDuration(p *time.Duration) (v time.Duration) {
return ptr.ToDuration(p)
}
// ToDurationSlice returns a slice of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. Returns a time.Duration
// zero value if the pointer was nil.
func ToDurationSlice(vs []*time.Duration) []time.Duration {
return ptr.ToDurationSlice(vs)
}
// ToDurationMap returns a map of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. The time.Duration
// zero value is used if the pointer was nil.
func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
return ptr.ToDurationMap(vs)
}

@ -1,6 +0,0 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.15"

@ -1,119 +0,0 @@
// Code generated by aws/logging_generate.go DO NOT EDIT.
package aws
// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
// each bit is a flag that describes the logging behavior for one or more client components.
// The entire 64-bit group is reserved for later expansion by the SDK.
//
// Example: Setting ClientLogMode to enable logging of retries and requests
//
// clientLogMode := aws.LogRetries | aws.LogRequest
//
// Example: Adding an additional log mode to an existing ClientLogMode value
//
// clientLogMode |= aws.LogResponse
type ClientLogMode uint64
// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
const (
LogSigning ClientLogMode = 1 << (64 - 1 - iota)
LogRetries
LogRequest
LogRequestWithBody
LogResponse
LogResponseWithBody
LogDeprecatedUsage
LogRequestEventMessage
LogResponseEventMessage
)
// IsSigning returns whether the Signing logging mode bit is set
func (m ClientLogMode) IsSigning() bool {
return m&LogSigning != 0
}
// IsRetries returns whether the Retries logging mode bit is set
func (m ClientLogMode) IsRetries() bool {
return m&LogRetries != 0
}
// IsRequest returns whether the Request logging mode bit is set
func (m ClientLogMode) IsRequest() bool {
return m&LogRequest != 0
}
// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set
func (m ClientLogMode) IsRequestWithBody() bool {
return m&LogRequestWithBody != 0
}
// IsResponse returns whether the Response logging mode bit is set
func (m ClientLogMode) IsResponse() bool {
return m&LogResponse != 0
}
// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set
func (m ClientLogMode) IsResponseWithBody() bool {
return m&LogResponseWithBody != 0
}
// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set
func (m ClientLogMode) IsDeprecatedUsage() bool {
return m&LogDeprecatedUsage != 0
}
// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set
func (m ClientLogMode) IsRequestEventMessage() bool {
return m&LogRequestEventMessage != 0
}
// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set
func (m ClientLogMode) IsResponseEventMessage() bool {
return m&LogResponseEventMessage != 0
}
// ClearSigning clears the Signing logging mode bit
func (m *ClientLogMode) ClearSigning() {
*m &^= LogSigning
}
// ClearRetries clears the Retries logging mode bit
func (m *ClientLogMode) ClearRetries() {
*m &^= LogRetries
}
// ClearRequest clears the Request logging mode bit
func (m *ClientLogMode) ClearRequest() {
*m &^= LogRequest
}
// ClearRequestWithBody clears the RequestWithBody logging mode bit
func (m *ClientLogMode) ClearRequestWithBody() {
*m &^= LogRequestWithBody
}
// ClearResponse clears the Response logging mode bit
func (m *ClientLogMode) ClearResponse() {
*m &^= LogResponse
}
// ClearResponseWithBody clears the ResponseWithBody logging mode bit
func (m *ClientLogMode) ClearResponseWithBody() {
*m &^= LogResponseWithBody
}
// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit
func (m *ClientLogMode) ClearDeprecatedUsage() {
*m &^= LogDeprecatedUsage
}
// ClearRequestEventMessage clears the RequestEventMessage logging mode bit
func (m *ClientLogMode) ClearRequestEventMessage() {
*m &^= LogRequestEventMessage
}
// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit
func (m *ClientLogMode) ClearResponseEventMessage() {
*m &^= LogResponseEventMessage
}

@ -1,95 +0,0 @@
//go:build clientlogmode
// +build clientlogmode
package main
import (
"fmt"
"log"
"os"
"strings"
"text/template"
)
var config = struct {
ModeBits []string
}{
// Items should be appended only to keep bit-flag positions stable
ModeBits: []string{
"Signing",
"Retries",
"Request",
"RequestWithBody",
"Response",
"ResponseWithBody",
"DeprecatedUsage",
"RequestEventMessage",
"ResponseEventMessage",
},
}
func bitName(name string) string {
return strings.ToUpper(name[:1]) + name[1:]
}
var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{
"symbolName": func(name string) string {
return "Log" + bitName(name)
},
"bitName": bitName,
}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT.
package aws
// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
// each bit is a flag that describes the logging behavior for one or more client components.
// The entire 64-bit group is reserved for later expansion by the SDK.
//
// Example: Setting ClientLogMode to enable logging of retries and requests
// clientLogMode := aws.LogRetries | aws.LogRequest
//
// Example: Adding an additional log mode to an existing ClientLogMode value
// clientLogMode |= aws.LogResponse
type ClientLogMode uint64
// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
const (
{{- range $index, $field := .ModeBits }}
{{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }}
{{- end }}
)
{{ range $_, $field := .ModeBits }}
// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set
func (m ClientLogMode) Is{{- bitName $field }}() bool {
return m&{{- (symbolName $field) }} != 0
}
{{ end }}
{{- range $_, $field := .ModeBits }}
// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit
func (m *ClientLogMode) Clear{{- bitName $field }}() {
*m &^= {{ (symbolName $field) }}
}
{{ end -}}
`))
func main() {
uniqueBitFields := make(map[string]struct{})
for _, bitName := range config.ModeBits {
if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok {
panic(fmt.Sprintf("duplicate bit field: %s", bitName))
}
uniqueBitFields[bitName] = struct{}{}
}
file, err := os.Create("logging.go")
if err != nil {
log.Fatal(err)
}
defer file.Close()
err = tmpl.Execute(file, config)
if err != nil {
log.Fatal(err)
}
}

@ -1,180 +0,0 @@
package middleware
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go/middleware"
)
// RegisterServiceMetadata registers metadata about the service and operation into the middleware context
// so that it is available at runtime for other middleware to introspect.
type RegisterServiceMetadata struct {
ServiceID string
SigningName string
Region string
OperationName string
}
// ID returns the middleware identifier.
func (s *RegisterServiceMetadata) ID() string {
return "RegisterServiceMetadata"
}
// HandleInitialize registers service metadata information into the middleware context, allowing for introspection.
func (s RegisterServiceMetadata) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
if len(s.ServiceID) > 0 {
ctx = SetServiceID(ctx, s.ServiceID)
}
if len(s.SigningName) > 0 {
ctx = SetSigningName(ctx, s.SigningName)
}
if len(s.Region) > 0 {
ctx = setRegion(ctx, s.Region)
}
if len(s.OperationName) > 0 {
ctx = setOperationName(ctx, s.OperationName)
}
return next.HandleInitialize(ctx, in)
}
// service metadata keys for storing and lookup of runtime stack information.
type (
serviceIDKey struct{}
signingNameKey struct{}
signingRegionKey struct{}
regionKey struct{}
operationNameKey struct{}
partitionIDKey struct{}
)
// GetServiceID retrieves the service id from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetServiceID(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string)
return v
}
// GetSigningName retrieves the service signing name from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetSigningName(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string)
return v
}
// GetSigningRegion retrieves the region from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetSigningRegion(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string)
return v
}
// GetRegion retrieves the endpoint region from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetRegion(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, regionKey{}).(string)
return v
}
// GetOperationName retrieves the service operation metadata from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetOperationName(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string)
return v
}
// GetPartitionID retrieves the endpoint partition id from the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func GetPartitionID(ctx context.Context) string {
v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string)
return v
}
// SetSigningName set or modifies the signing name on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func SetSigningName(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, signingNameKey{}, value)
}
// SetSigningRegion sets or modifies the region on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func SetSigningRegion(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, signingRegionKey{}, value)
}
// SetServiceID sets the service id on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func SetServiceID(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, serviceIDKey{}, value)
}
// setRegion sets the endpoint region on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func setRegion(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, regionKey{}, value)
}
// setOperationName sets the service operation on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func setOperationName(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, operationNameKey{}, value)
}
// SetPartitionID sets the partition id of a resolved region on the context
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func SetPartitionID(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, partitionIDKey{}, value)
}
// EndpointSource key
type endpointSourceKey struct{}
// GetEndpointSource returns an endpoint source if set on context
func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) {
v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource)
return v
}
// SetEndpointSource sets endpoint source on context
func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context {
return middleware.WithStackValue(ctx, endpointSourceKey{}, value)
}
type signingCredentialsKey struct{}
// GetSigningCredentials returns the credentials that were used for signing if set on context.
func GetSigningCredentials(ctx context.Context) (v aws.Credentials) {
v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials)
return v
}
// SetSigningCredentials sets the credentails used for signing on the context.
func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context {
return middleware.WithStackValue(ctx, signingCredentialsKey{}, value)
}

@ -1,168 +0,0 @@
package middleware
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/internal/rand"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyrand "github.com/aws/smithy-go/rand"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation
// invocation.
type ClientRequestID struct{}
// ID the identifier for the ClientRequestID
func (r *ClientRequestID) ID() string {
return "ClientRequestID"
}
// HandleBuild attaches a unique operation invocation id for the operation to the request
func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", req)
}
invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID()
if err != nil {
return out, metadata, err
}
const invocationIDHeader = "Amz-Sdk-Invocation-Id"
req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID)
return next.HandleBuild(ctx, in)
}
// RecordResponseTiming records the response timing for the SDK client requests.
type RecordResponseTiming struct{}
// ID is the middleware identifier
func (a *RecordResponseTiming) ID() string {
return "RecordResponseTiming"
}
// HandleDeserialize calculates response metadata and clock skew
func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
responseAt := sdk.NowTime()
setResponseAt(&metadata, responseAt)
var serverTime time.Time
switch resp := out.RawResponse.(type) {
case *smithyhttp.Response:
respDateHeader := resp.Header.Get("Date")
if len(respDateHeader) == 0 {
break
}
var parseErr error
serverTime, parseErr = smithyhttp.ParseTime(respDateHeader)
if parseErr != nil {
logger := middleware.GetLogger(ctx)
logger.Logf(logging.Warn, "failed to parse response Date header value, got %v",
parseErr.Error())
break
}
setServerTime(&metadata, serverTime)
}
if !serverTime.IsZero() {
attemptSkew := serverTime.Sub(responseAt)
setAttemptSkew(&metadata, attemptSkew)
}
return out, metadata, err
}
type responseAtKey struct{}
// GetResponseAt returns the time response was received at.
func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) {
v, ok = metadata.Get(responseAtKey{}).(time.Time)
return v, ok
}
// setResponseAt sets the response time on the metadata.
func setResponseAt(metadata *middleware.Metadata, v time.Time) {
metadata.Set(responseAtKey{}, v)
}
type serverTimeKey struct{}
// GetServerTime returns the server time for response.
func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) {
v, ok = metadata.Get(serverTimeKey{}).(time.Time)
return v, ok
}
// setServerTime sets the server time on the metadata.
func setServerTime(metadata *middleware.Metadata, v time.Time) {
metadata.Set(serverTimeKey{}, v)
}
type attemptSkewKey struct{}
// GetAttemptSkew returns Attempt clock skew for response from metadata.
func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) {
v, ok = metadata.Get(attemptSkewKey{}).(time.Duration)
return v, ok
}
// setAttemptSkew sets the attempt clock skew on the metadata.
func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) {
metadata.Set(attemptSkewKey{}, v)
}
// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack
func AddClientRequestIDMiddleware(stack *middleware.Stack) error {
return stack.Build.Add(&ClientRequestID{}, middleware.After)
}
// AddRecordResponseTiming adds RecordResponseTiming middleware to the
// middleware stack.
func AddRecordResponseTiming(stack *middleware.Stack) error {
return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After)
}
// rawResponseKey is the accessor key used to store and access the
// raw response within the response metadata.
type rawResponseKey struct{}
// addRawResponse middleware adds raw response on to the metadata
type addRawResponse struct{}
// ID the identifier for the ClientRequestID
func (m *addRawResponse) ID() string {
return "AddRawResponseToMetadata"
}
// HandleDeserialize adds raw response on the middleware metadata
func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
metadata.Set(rawResponseKey{}, out.RawResponse)
return out, metadata, err
}
// AddRawResponseToMetadata adds middleware to the middleware stack that
// store raw response on to the metadata.
func AddRawResponseToMetadata(stack *middleware.Stack) error {
return stack.Deserialize.Add(&addRawResponse{}, middleware.Before)
}
// GetRawResponse returns raw response set on metadata
func GetRawResponse(metadata middleware.Metadata) interface{} {
return metadata.Get(rawResponseKey{})
}

@ -1,24 +0,0 @@
//go:build go1.16
// +build go1.16
package middleware
import "runtime"
func getNormalizedOSName() (os string) {
switch runtime.GOOS {
case "android":
os = "android"
case "linux":
os = "linux"
case "windows":
os = "windows"
case "darwin":
os = "macos"
case "ios":
os = "ios"
default:
os = "other"
}
return os
}

@ -1,24 +0,0 @@
//go:build !go1.16
// +build !go1.16
package middleware
import "runtime"
func getNormalizedOSName() (os string) {
switch runtime.GOOS {
case "android":
os = "android"
case "linux":
os = "linux"
case "windows":
os = "windows"
case "darwin":
// Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64
// For now declare this as "other" until we have a better detection mechanism.
fallthrough
default:
os = "other"
}
return os
}

@ -1,27 +0,0 @@
package middleware
import (
"github.com/aws/smithy-go/middleware"
)
// requestIDKey is used to retrieve request id from response metadata
type requestIDKey struct{}
// SetRequestIDMetadata sets the provided request id over middleware metadata
func SetRequestIDMetadata(metadata *middleware.Metadata, id string) {
metadata.Set(requestIDKey{}, id)
}
// GetRequestIDMetadata retrieves the request id from middleware metadata
// returns string and bool indicating value of request id, whether request id was set.
func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) {
if !metadata.Has(requestIDKey{}) {
return "", false
}
v, ok := metadata.Get(requestIDKey{}).(string)
if !ok {
return "", true
}
return v, true
}

@ -1,49 +0,0 @@
package middleware
import (
"context"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// AddRequestIDRetrieverMiddleware adds request id retriever middleware
func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
// add error wrapper middleware before operation deserializers so that it can wrap the error response
// returned by operation deserializers
return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before)
}
type requestIDRetriever struct {
}
// ID returns the middleware identifier
func (m *requestIDRetriever) ID() string {
return "RequestIDRetriever"
}
func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
resp, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
// No raw response to wrap with.
return out, metadata, err
}
// Different header which can map to request id
requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"}
for _, h := range requestIDHeaderList {
// check for headers known to contain Request id
if v := resp.Header.Get(h); len(v) != 0 {
// set reqID on metadata for successful responses.
SetRequestIDMetadata(&metadata, v)
break
}
}
return out, metadata, err
}

@ -1,243 +0,0 @@
package middleware
import (
"context"
"fmt"
"os"
"runtime"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
var languageVersion = strings.TrimPrefix(runtime.Version(), "go")
// SDKAgentKeyType is the metadata type to add to the SDK agent string
type SDKAgentKeyType int
// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will
// be mapped to AdditionalMetadata.
const (
_ SDKAgentKeyType = iota
APIMetadata
OperatingSystemMetadata
LanguageMetadata
EnvironmentMetadata
FeatureMetadata
ConfigMetadata
FrameworkMetadata
AdditionalMetadata
ApplicationIdentifier
)
func (k SDKAgentKeyType) string() string {
switch k {
case APIMetadata:
return "api"
case OperatingSystemMetadata:
return "os"
case LanguageMetadata:
return "lang"
case EnvironmentMetadata:
return "exec-env"
case FeatureMetadata:
return "ft"
case ConfigMetadata:
return "cfg"
case FrameworkMetadata:
return "lib"
case ApplicationIdentifier:
return "app"
case AdditionalMetadata:
fallthrough
default:
return "md"
}
}
const execEnvVar = `AWS_EXECUTION_ENV`
// requestUserAgent is a build middleware that set the User-Agent for the request.
type requestUserAgent struct {
sdkAgent, userAgent *smithyhttp.UserAgentBuilder
}
// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
// request.
//
// User-Agent example:
//
// aws-sdk-go-v2/1.2.3
//
// X-Amz-User-Agent example:
//
// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
func newRequestUserAgent() *requestUserAgent {
userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
addProductName(userAgent)
addProductName(sdkAgent)
r := &requestUserAgent{
sdkAgent: sdkAgent,
userAgent: userAgent,
}
addSDKMetadata(r)
return r
}
func addSDKMetadata(r *requestUserAgent) {
r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH)
if ev := os.Getenv(execEnvVar); len(ev) > 0 {
r.AddSDKAgentKey(EnvironmentMetadata, ev)
}
}
func addProductName(builder *smithyhttp.UserAgentBuilder) {
builder.AddKeyValue(aws.SDKName, aws.SDKVersion)
}
// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
func AddUserAgentKey(key string) func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
requestUserAgent, err := getOrAddRequestUserAgent(stack)
if err != nil {
return err
}
requestUserAgent.AddUserAgentKey(key)
return nil
}
}
// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
requestUserAgent, err := getOrAddRequestUserAgent(stack)
if err != nil {
return err
}
requestUserAgent.AddUserAgentKeyValue(key, value)
return nil
}
}
// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
requestUserAgent, err := getOrAddRequestUserAgent(stack)
if err != nil {
return err
}
requestUserAgent.AddSDKAgentKey(keyType, key)
return nil
}
}
// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
requestUserAgent, err := getOrAddRequestUserAgent(stack)
if err != nil {
return err
}
requestUserAgent.AddSDKAgentKeyValue(keyType, key, value)
return nil
}
}
// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present.
func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
_, err := getOrAddRequestUserAgent(stack)
return err
}
func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) {
id := (*requestUserAgent)(nil).ID()
bm, ok := stack.Build.Get(id)
if !ok {
bm = newRequestUserAgent()
err := stack.Build.Add(bm, middleware.After)
if err != nil {
return nil, err
}
}
requestUserAgent, ok := bm.(*requestUserAgent)
if !ok {
return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
}
return requestUserAgent, nil
}
// AddUserAgentKey adds the component identified by name to the User-Agent string.
func (u *requestUserAgent) AddUserAgentKey(key string) {
u.userAgent.AddKey(key)
}
// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) {
u.userAgent.AddKeyValue(key, value)
}
// AddUserAgentKey adds the component identified by name to the User-Agent string.
func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
// TODO: should target sdkAgent
u.userAgent.AddKey(keyType.string() + "/" + key)
}
// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
// TODO: should target sdkAgent
u.userAgent.AddKeyValue(keyType.string()+"/"+key, value)
}
// ID the name of the middleware.
func (u *requestUserAgent) ID() string {
return "UserAgent"
}
// HandleBuild adds or appends the constructed user agent to the request.
func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
switch req := in.Request.(type) {
case *smithyhttp.Request:
u.addHTTPUserAgent(req)
// TODO: To be re-enabled
// u.addHTTPSDKAgent(req)
default:
return out, metadata, fmt.Errorf("unknown transport type %T", in)
}
return next.HandleBuild(ctx, in)
}
func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
const userAgent = "User-Agent"
updateHTTPHeader(request, userAgent, u.userAgent.Build())
}
func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
const sdkAgent = "X-Amz-User-Agent"
updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
}
func updateHTTPHeader(request *smithyhttp.Request, header string, value string) {
var current string
if v := request.Header[header]; len(v) > 0 {
current = v[0]
}
if len(current) > 0 {
current = value + " " + current
} else {
current = value
}
request.Header[header] = append(request.Header[header][:0], current)
}

@ -1,54 +0,0 @@
# v1.4.8 (2022-09-14)
* No change notes available for this release.
# v1.4.7 (2022-09-02)
* No change notes available for this release.
# v1.4.6 (2022-08-31)
* No change notes available for this release.
# v1.4.5 (2022-08-29)
* No change notes available for this release.
# v1.4.4 (2022-08-09)
* No change notes available for this release.
# v1.4.3 (2022-06-29)
* No change notes available for this release.
# v1.4.2 (2022-06-07)
* No change notes available for this release.
# v1.4.1 (2022-03-24)
* No change notes available for this release.
# v1.4.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.3.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.2.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.1.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.0.0 (2021-11-06)
* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.
* **Release**: Protocol support has been added for AWS event stream.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,144 +0,0 @@
package eventstream
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
)
type decodedMessage struct {
rawMessage
Headers decodedHeaders `json:"headers"`
}
type jsonMessage struct {
Length json.Number `json:"total_length"`
HeadersLen json.Number `json:"headers_length"`
PreludeCRC json.Number `json:"prelude_crc"`
Headers decodedHeaders `json:"headers"`
Payload []byte `json:"payload"`
CRC json.Number `json:"message_crc"`
}
func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
var jsonMsg jsonMessage
if err = json.Unmarshal(b, &jsonMsg); err != nil {
return err
}
d.Length, err = numAsUint32(jsonMsg.Length)
if err != nil {
return err
}
d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
if err != nil {
return err
}
d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
if err != nil {
return err
}
d.Headers = jsonMsg.Headers
d.Payload = jsonMsg.Payload
d.CRC, err = numAsUint32(jsonMsg.CRC)
if err != nil {
return err
}
return nil
}
func (d *decodedMessage) MarshalJSON() ([]byte, error) {
jsonMsg := jsonMessage{
Length: json.Number(strconv.Itoa(int(d.Length))),
HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
Headers: d.Headers,
Payload: d.Payload,
CRC: json.Number(strconv.Itoa(int(d.CRC))),
}
return json.Marshal(jsonMsg)
}
func numAsUint32(n json.Number) (uint32, error) {
v, err := n.Int64()
if err != nil {
return 0, fmt.Errorf("failed to get int64 json number, %v", err)
}
return uint32(v), nil
}
func (d decodedMessage) Message() Message {
return Message{
Headers: Headers(d.Headers),
Payload: d.Payload,
}
}
type decodedHeaders Headers
func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
var jsonHeaders []struct {
Name string `json:"name"`
Type valueType `json:"type"`
Value interface{} `json:"value"`
}
decoder := json.NewDecoder(bytes.NewReader(b))
decoder.UseNumber()
if err := decoder.Decode(&jsonHeaders); err != nil {
return err
}
var headers Headers
for _, h := range jsonHeaders {
value, err := valueFromType(h.Type, h.Value)
if err != nil {
return err
}
headers.Set(h.Name, value)
}
*hs = decodedHeaders(headers)
return nil
}
func valueFromType(typ valueType, val interface{}) (Value, error) {
switch typ {
case trueValueType:
return BoolValue(true), nil
case falseValueType:
return BoolValue(false), nil
case int8ValueType:
v, err := val.(json.Number).Int64()
return Int8Value(int8(v)), err
case int16ValueType:
v, err := val.(json.Number).Int64()
return Int16Value(int16(v)), err
case int32ValueType:
v, err := val.(json.Number).Int64()
return Int32Value(int32(v)), err
case int64ValueType:
v, err := val.(json.Number).Int64()
return Int64Value(v), err
case bytesValueType:
v, err := base64.StdEncoding.DecodeString(val.(string))
return BytesValue(v), err
case stringValueType:
v, err := base64.StdEncoding.DecodeString(val.(string))
return StringValue(string(v)), err
case timestampValueType:
v, err := val.(json.Number).Int64()
return TimestampValue(timeFromEpochMilli(v)), err
case uuidValueType:
v, err := base64.StdEncoding.DecodeString(val.(string))
var tv UUIDValue
copy(tv[:], v)
return tv, err
default:
panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
}
}

@ -1,218 +0,0 @@
package eventstream
import (
"bytes"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/aws/smithy-go/logging"
"hash"
"hash/crc32"
"io"
)
// DecoderOptions is the Decoder configuration options.
type DecoderOptions struct {
Logger logging.Logger
LogMessages bool
}
// Decoder provides decoding of an Event Stream messages.
type Decoder struct {
options DecoderOptions
}
// NewDecoder initializes and returns a Decoder for decoding event
// stream messages from the reader provided.
func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder {
options := DecoderOptions{}
for _, fn := range optFns {
fn(&options)
}
return &Decoder{
options: options,
}
}
// Decode attempts to decode a single message from the event stream reader.
// Will return the event stream message, or error if decodeMessage fails to read
// the message from the stream.
//
// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers
// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying
// payloadBuf byte slice.
func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
if d.options.Logger != nil && d.options.LogMessages {
debugMsgBuf := bytes.NewBuffer(nil)
reader = io.TeeReader(reader, debugMsgBuf)
defer func() {
logMessageDecode(d.options.Logger, debugMsgBuf, m, err)
}()
}
m, err = decodeMessage(reader, payloadBuf)
return m, err
}
// decodeMessage attempts to decode a single message from the event stream reader.
// Will return the event stream message, or error if decodeMessage fails to read
// the message from the reader.
func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) {
crc := crc32.New(crc32IEEETable)
hashReader := io.TeeReader(reader, crc)
prelude, err := decodePrelude(hashReader, crc)
if err != nil {
return Message{}, err
}
if prelude.HeadersLen > 0 {
lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
m.Headers, err = decodeHeaders(lr)
if err != nil {
return Message{}, err
}
}
if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
if err != nil {
return Message{}, err
}
m.Payload = buf
}
msgCRC := crc.Sum32()
if err := validateCRC(reader, msgCRC); err != nil {
return Message{}, err
}
return m, nil
}
func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
w := bytes.NewBuffer(nil)
defer func() { logger.Logf(logging.Debug, w.String()) }()
fmt.Fprintf(w, "Raw message:\n%s\n",
hex.Dump(msgBuf.Bytes()))
if decodeErr != nil {
fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr)
return
}
rawMsg, err := msg.rawMessage()
if err != nil {
fmt.Fprintf(w, "failed to create raw message, %v\n", err)
return
}
decodedMsg := decodedMessage{
rawMessage: rawMsg,
Headers: decodedHeaders(msg.Headers),
}
fmt.Fprintf(w, "Decoded message:\n")
encoder := json.NewEncoder(w)
if err := encoder.Encode(decodedMsg); err != nil {
fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
}
}
func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
var p messagePrelude
var err error
p.Length, err = decodeUint32(r)
if err != nil {
return messagePrelude{}, err
}
p.HeadersLen, err = decodeUint32(r)
if err != nil {
return messagePrelude{}, err
}
if err := p.ValidateLens(); err != nil {
return messagePrelude{}, err
}
preludeCRC := crc.Sum32()
if err := validateCRC(r, preludeCRC); err != nil {
return messagePrelude{}, err
}
p.PreludeCRC = preludeCRC
return p, nil
}
func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
w := bytes.NewBuffer(buf[0:0])
_, err := io.Copy(w, r)
return w.Bytes(), err
}
func decodeUint8(r io.Reader) (uint8, error) {
type byteReader interface {
ReadByte() (byte, error)
}
if br, ok := r.(byteReader); ok {
v, err := br.ReadByte()
return v, err
}
var b [1]byte
_, err := io.ReadFull(r, b[:])
return b[0], err
}
func decodeUint16(r io.Reader) (uint16, error) {
var b [2]byte
bs := b[:]
_, err := io.ReadFull(r, bs)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint16(bs), nil
}
func decodeUint32(r io.Reader) (uint32, error) {
var b [4]byte
bs := b[:]
_, err := io.ReadFull(r, bs)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(bs), nil
}
func decodeUint64(r io.Reader) (uint64, error) {
var b [8]byte
bs := b[:]
_, err := io.ReadFull(r, bs)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint64(bs), nil
}
func validateCRC(r io.Reader, expect uint32) error {
msgCRC, err := decodeUint32(r)
if err != nil {
return err
}
if msgCRC != expect {
return ChecksumError{}
}
return nil
}

@ -1,167 +0,0 @@
package eventstream
import (
"bytes"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/aws/smithy-go/logging"
"hash"
"hash/crc32"
"io"
)
// EncoderOptions is the configuration options for Encoder.
type EncoderOptions struct {
Logger logging.Logger
LogMessages bool
}
// Encoder provides EventStream message encoding.
type Encoder struct {
options EncoderOptions
headersBuf *bytes.Buffer
messageBuf *bytes.Buffer
}
// NewEncoder initializes and returns an Encoder to encode Event Stream
// messages.
func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder {
o := EncoderOptions{}
for _, fn := range optFns {
fn(&o)
}
return &Encoder{
options: o,
headersBuf: bytes.NewBuffer(nil),
messageBuf: bytes.NewBuffer(nil),
}
}
// Encode encodes a single EventStream message to the io.Writer the Encoder
// was created with. An error is returned if writing the message fails.
func (e *Encoder) Encode(w io.Writer, msg Message) (err error) {
e.headersBuf.Reset()
e.messageBuf.Reset()
var writer io.Writer = e.messageBuf
if e.options.Logger != nil && e.options.LogMessages {
encodeMsgBuf := bytes.NewBuffer(nil)
writer = io.MultiWriter(writer, encodeMsgBuf)
defer func() {
logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err)
}()
}
if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil {
return err
}
crc := crc32.New(crc32IEEETable)
hashWriter := io.MultiWriter(writer, crc)
headersLen := uint32(e.headersBuf.Len())
payloadLen := uint32(len(msg.Payload))
if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
return err
}
if headersLen > 0 {
if _, err = io.Copy(hashWriter, e.headersBuf); err != nil {
return err
}
}
if payloadLen > 0 {
if _, err = hashWriter.Write(msg.Payload); err != nil {
return err
}
}
msgCRC := crc.Sum32()
if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil {
return err
}
_, err = io.Copy(w, e.messageBuf)
return err
}
func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) {
w := bytes.NewBuffer(nil)
defer func() { logger.Logf(logging.Debug, w.String()) }()
fmt.Fprintf(w, "Message to encode:\n")
encoder := json.NewEncoder(w)
if err := encoder.Encode(msg); err != nil {
fmt.Fprintf(w, "Failed to get encoded message, %v\n", err)
}
if encodeErr != nil {
fmt.Fprintf(w, "Encode error: %v\n", encodeErr)
return
}
fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes()))
}
func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
p := messagePrelude{
Length: minMsgLen + headersLen + payloadLen,
HeadersLen: headersLen,
}
if err := p.ValidateLens(); err != nil {
return err
}
err := binaryWriteFields(w, binary.BigEndian,
p.Length,
p.HeadersLen,
)
if err != nil {
return err
}
p.PreludeCRC = crc.Sum32()
err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
if err != nil {
return err
}
return nil
}
// EncodeHeaders writes the header values to the writer encoded in the event
// stream format. Returns an error if a header fails to encode.
func EncodeHeaders(w io.Writer, headers Headers) error {
for _, h := range headers {
hn := headerName{
Len: uint8(len(h.Name)),
}
copy(hn.Name[:hn.Len], h.Name)
if err := hn.encode(w); err != nil {
return err
}
if err := h.Value.encode(w); err != nil {
return err
}
}
return nil
}
func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
for _, v := range vs {
if err := binary.Write(w, order, v); err != nil {
return err
}
}
return nil
}

@ -1,23 +0,0 @@
package eventstream
import "fmt"
// LengthError provides the error for items being larger than a maximum length.
type LengthError struct {
Part string
Want int
Have int
Value interface{}
}
func (e LengthError) Error() string {
return fmt.Sprintf("%s length invalid, %d/%d, %v",
e.Part, e.Want, e.Have, e.Value)
}
// ChecksumError provides the error for message checksum invalidation errors.
type ChecksumError struct{}
func (e ChecksumError) Error() string {
return "message checksum mismatch"
}

@ -1,24 +0,0 @@
package eventstreamapi
// EventStream headers with specific meaning to async API functionality.
const (
ChunkSignatureHeader = `:chunk-signature` // chunk signature for message
DateHeader = `:date` // Date header for signature
ContentTypeHeader = ":content-type" // message payload content-type
// Message header and values
MessageTypeHeader = `:message-type` // Identifies type of message.
EventMessageType = `event`
ErrorMessageType = `error`
ExceptionMessageType = `exception`
// Message Events
EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
// Message Error
ErrorCodeHeader = `:error-code`
ErrorMessageHeader = `:error-message`
// Message Exception
ExceptionTypeHeader = `:exception-type`
)

@ -1,71 +0,0 @@
package eventstreamapi
import (
"context"
"fmt"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
)
type eventStreamWriterKey struct{}
// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream.
func GetInputStreamWriter(ctx context.Context) io.WriteCloser {
writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser)
return writeCloser
}
func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context {
return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser)
}
// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages
// via the HTTP request body.
type InitializeStreamWriter struct{}
// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack.
func AddInitializeStreamWriter(stack *middleware.Stack) error {
return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After)
}
// ID returns the identifier for the middleware.
func (i *InitializeStreamWriter) ID() string {
return "InitializeStreamWriter"
}
// HandleFinalize is the middleware implementation.
func (i *InitializeStreamWriter) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request)
}
inputReader, inputWriter := io.Pipe()
defer func() {
if err == nil {
return
}
_ = inputReader.Close()
_ = inputWriter.Close()
}()
request, err = request.SetStream(inputReader)
if err != nil {
return out, metadata, err
}
in.Request = request
ctx = setInputStreamWriter(ctx, inputWriter)
out, metadata, err = next.HandleFinalize(ctx, in)
if err != nil {
return out, metadata, err
}
return out, metadata, err
}

@ -1,13 +0,0 @@
//go:build go1.18
// +build go1.18
package eventstreamapi
import smithyhttp "github.com/aws/smithy-go/transport/http"
// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
//
// This operation is a no-op for Go 1.18 and above.
func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
return nil
}

@ -1,12 +0,0 @@
//go:build !go1.18
// +build !go1.18
package eventstreamapi
import smithyhttp "github.com/aws/smithy-go/transport/http"
// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
r.Header.Set("Expect", "100-continue")
return nil
}

@ -1,6 +0,0 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package eventstream
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.4.8"

@ -1,175 +0,0 @@
package eventstream
import (
"encoding/binary"
"fmt"
"io"
)
// Headers are a collection of EventStream header values.
type Headers []Header
// Header is a single EventStream Key Value header pair.
type Header struct {
Name string
Value Value
}
// Set associates the name with a value. If the header name already exists in
// the Headers the value will be replaced with the new one.
func (hs *Headers) Set(name string, value Value) {
var i int
for ; i < len(*hs); i++ {
if (*hs)[i].Name == name {
(*hs)[i].Value = value
return
}
}
*hs = append(*hs, Header{
Name: name, Value: value,
})
}
// Get returns the Value associated with the header. Nil is returned if the
// value does not exist.
func (hs Headers) Get(name string) Value {
for i := 0; i < len(hs); i++ {
if h := hs[i]; h.Name == name {
return h.Value
}
}
return nil
}
// Del deletes the value in the Headers if it exists.
func (hs *Headers) Del(name string) {
for i := 0; i < len(*hs); i++ {
if (*hs)[i].Name == name {
copy((*hs)[i:], (*hs)[i+1:])
(*hs) = (*hs)[:len(*hs)-1]
}
}
}
// Clone returns a deep copy of the headers
func (hs Headers) Clone() Headers {
o := make(Headers, 0, len(hs))
for _, h := range hs {
o.Set(h.Name, h.Value)
}
return o
}
func decodeHeaders(r io.Reader) (Headers, error) {
hs := Headers{}
for {
name, err := decodeHeaderName(r)
if err != nil {
if err == io.EOF {
// EOF while getting header name means no more headers
break
}
return nil, err
}
value, err := decodeHeaderValue(r)
if err != nil {
return nil, err
}
hs.Set(name, value)
}
return hs, nil
}
func decodeHeaderName(r io.Reader) (string, error) {
var n headerName
var err error
n.Len, err = decodeUint8(r)
if err != nil {
return "", err
}
name := n.Name[:n.Len]
if _, err := io.ReadFull(r, name); err != nil {
return "", err
}
return string(name), nil
}
func decodeHeaderValue(r io.Reader) (Value, error) {
var raw rawValue
typ, err := decodeUint8(r)
if err != nil {
return nil, err
}
raw.Type = valueType(typ)
var v Value
switch raw.Type {
case trueValueType:
v = BoolValue(true)
case falseValueType:
v = BoolValue(false)
case int8ValueType:
var tv Int8Value
err = tv.decode(r)
v = tv
case int16ValueType:
var tv Int16Value
err = tv.decode(r)
v = tv
case int32ValueType:
var tv Int32Value
err = tv.decode(r)
v = tv
case int64ValueType:
var tv Int64Value
err = tv.decode(r)
v = tv
case bytesValueType:
var tv BytesValue
err = tv.decode(r)
v = tv
case stringValueType:
var tv StringValue
err = tv.decode(r)
v = tv
case timestampValueType:
var tv TimestampValue
err = tv.decode(r)
v = tv
case uuidValueType:
var tv UUIDValue
err = tv.decode(r)
v = tv
default:
panic(fmt.Sprintf("unknown value type %d", raw.Type))
}
// Error could be EOF, let caller deal with it
return v, err
}
const maxHeaderNameLen = 255
type headerName struct {
Len uint8
Name [maxHeaderNameLen]byte
}
func (v headerName) encode(w io.Writer) error {
if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
return err
}
_, err := w.Write(v.Name[:v.Len])
return err
}

@ -1,521 +0,0 @@
package eventstream
import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strconv"
"time"
)
const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
// valueType is the EventStream header value type.
type valueType uint8
// Header value types
const (
trueValueType valueType = iota
falseValueType
int8ValueType // Byte
int16ValueType // Short
int32ValueType // Integer
int64ValueType // Long
bytesValueType
stringValueType
timestampValueType
uuidValueType
)
func (t valueType) String() string {
switch t {
case trueValueType:
return "bool"
case falseValueType:
return "bool"
case int8ValueType:
return "int8"
case int16ValueType:
return "int16"
case int32ValueType:
return "int32"
case int64ValueType:
return "int64"
case bytesValueType:
return "byte_array"
case stringValueType:
return "string"
case timestampValueType:
return "timestamp"
case uuidValueType:
return "uuid"
default:
return fmt.Sprintf("unknown value type %d", uint8(t))
}
}
type rawValue struct {
Type valueType
Len uint16 // Only set for variable length slices
Value []byte // byte representation of value, BigEndian encoding.
}
func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
return binaryWriteFields(w, binary.BigEndian,
r.Type,
v,
)
}
func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
binary.Write(w, binary.BigEndian, r.Type)
_, err := w.Write(v)
return err
}
func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
if len(v) > maxHeaderValueLen {
return LengthError{
Part: "header value",
Want: maxHeaderValueLen, Have: len(v),
Value: v,
}
}
r.Len = uint16(len(v))
err := binaryWriteFields(w, binary.BigEndian,
r.Type,
r.Len,
)
if err != nil {
return err
}
_, err = w.Write(v)
return err
}
func (r rawValue) encodeString(w io.Writer, v string) error {
if len(v) > maxHeaderValueLen {
return LengthError{
Part: "header value",
Want: maxHeaderValueLen, Have: len(v),
Value: v,
}
}
r.Len = uint16(len(v))
type stringWriter interface {
WriteString(string) (int, error)
}
err := binaryWriteFields(w, binary.BigEndian,
r.Type,
r.Len,
)
if err != nil {
return err
}
if sw, ok := w.(stringWriter); ok {
_, err = sw.WriteString(v)
} else {
_, err = w.Write([]byte(v))
}
return err
}
func decodeFixedBytesValue(r io.Reader, buf []byte) error {
_, err := io.ReadFull(r, buf)
return err
}
func decodeBytesValue(r io.Reader) ([]byte, error) {
var raw rawValue
var err error
raw.Len, err = decodeUint16(r)
if err != nil {
return nil, err
}
buf := make([]byte, raw.Len)
_, err = io.ReadFull(r, buf)
if err != nil {
return nil, err
}
return buf, nil
}
func decodeStringValue(r io.Reader) (string, error) {
v, err := decodeBytesValue(r)
return string(v), err
}
// Value represents the abstract header value.
type Value interface {
Get() interface{}
String() string
valueType() valueType
encode(io.Writer) error
}
// An BoolValue provides eventstream encoding, and representation
// of a Go bool value.
type BoolValue bool
// Get returns the underlying type
func (v BoolValue) Get() interface{} {
return bool(v)
}
// valueType returns the EventStream header value type value.
func (v BoolValue) valueType() valueType {
if v {
return trueValueType
}
return falseValueType
}
func (v BoolValue) String() string {
return strconv.FormatBool(bool(v))
}
// encode encodes the BoolValue into an eventstream binary value
// representation.
func (v BoolValue) encode(w io.Writer) error {
return binary.Write(w, binary.BigEndian, v.valueType())
}
// An Int8Value provides eventstream encoding, and representation of a Go
// int8 value.
type Int8Value int8
// Get returns the underlying value.
func (v Int8Value) Get() interface{} {
return int8(v)
}
// valueType returns the EventStream header value type value.
func (Int8Value) valueType() valueType {
return int8ValueType
}
func (v Int8Value) String() string {
return fmt.Sprintf("0x%02x", int8(v))
}
// encode encodes the Int8Value into an eventstream binary value
// representation.
func (v Int8Value) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeScalar(w, v)
}
func (v *Int8Value) decode(r io.Reader) error {
n, err := decodeUint8(r)
if err != nil {
return err
}
*v = Int8Value(n)
return nil
}
// An Int16Value provides eventstream encoding, and representation of a Go
// int16 value.
type Int16Value int16
// Get returns the underlying value.
func (v Int16Value) Get() interface{} {
return int16(v)
}
// valueType returns the EventStream header value type value.
func (Int16Value) valueType() valueType {
return int16ValueType
}
func (v Int16Value) String() string {
return fmt.Sprintf("0x%04x", int16(v))
}
// encode encodes the Int16Value into an eventstream binary value
// representation.
func (v Int16Value) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeScalar(w, v)
}
func (v *Int16Value) decode(r io.Reader) error {
n, err := decodeUint16(r)
if err != nil {
return err
}
*v = Int16Value(n)
return nil
}
// An Int32Value provides eventstream encoding, and representation of a Go
// int32 value.
type Int32Value int32
// Get returns the underlying value.
func (v Int32Value) Get() interface{} {
return int32(v)
}
// valueType returns the EventStream header value type value.
func (Int32Value) valueType() valueType {
return int32ValueType
}
func (v Int32Value) String() string {
return fmt.Sprintf("0x%08x", int32(v))
}
// encode encodes the Int32Value into an eventstream binary value
// representation.
func (v Int32Value) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeScalar(w, v)
}
func (v *Int32Value) decode(r io.Reader) error {
n, err := decodeUint32(r)
if err != nil {
return err
}
*v = Int32Value(n)
return nil
}
// An Int64Value provides eventstream encoding, and representation of a Go
// int64 value.
type Int64Value int64
// Get returns the underlying value.
func (v Int64Value) Get() interface{} {
return int64(v)
}
// valueType returns the EventStream header value type value.
func (Int64Value) valueType() valueType {
return int64ValueType
}
func (v Int64Value) String() string {
return fmt.Sprintf("0x%016x", int64(v))
}
// encode encodes the Int64Value into an eventstream binary value
// representation.
func (v Int64Value) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeScalar(w, v)
}
func (v *Int64Value) decode(r io.Reader) error {
n, err := decodeUint64(r)
if err != nil {
return err
}
*v = Int64Value(n)
return nil
}
// An BytesValue provides eventstream encoding, and representation of a Go
// byte slice.
type BytesValue []byte
// Get returns the underlying value.
func (v BytesValue) Get() interface{} {
return []byte(v)
}
// valueType returns the EventStream header value type value.
func (BytesValue) valueType() valueType {
return bytesValueType
}
func (v BytesValue) String() string {
return base64.StdEncoding.EncodeToString([]byte(v))
}
// encode encodes the BytesValue into an eventstream binary value
// representation.
func (v BytesValue) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeBytes(w, []byte(v))
}
func (v *BytesValue) decode(r io.Reader) error {
buf, err := decodeBytesValue(r)
if err != nil {
return err
}
*v = BytesValue(buf)
return nil
}
// An StringValue provides eventstream encoding, and representation of a Go
// string.
type StringValue string
// Get returns the underlying value.
func (v StringValue) Get() interface{} {
return string(v)
}
// valueType returns the EventStream header value type value.
func (StringValue) valueType() valueType {
return stringValueType
}
func (v StringValue) String() string {
return string(v)
}
// encode encodes the StringValue into an eventstream binary value
// representation.
func (v StringValue) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeString(w, string(v))
}
func (v *StringValue) decode(r io.Reader) error {
s, err := decodeStringValue(r)
if err != nil {
return err
}
*v = StringValue(s)
return nil
}
// An TimestampValue provides eventstream encoding, and representation of a Go
// timestamp.
type TimestampValue time.Time
// Get returns the underlying value.
func (v TimestampValue) Get() interface{} {
return time.Time(v)
}
// valueType returns the EventStream header value type value.
func (TimestampValue) valueType() valueType {
return timestampValueType
}
func (v TimestampValue) epochMilli() int64 {
nano := time.Time(v).UnixNano()
msec := nano / int64(time.Millisecond)
return msec
}
func (v TimestampValue) String() string {
msec := v.epochMilli()
return strconv.FormatInt(msec, 10)
}
// encode encodes the TimestampValue into an eventstream binary value
// representation.
func (v TimestampValue) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
msec := v.epochMilli()
return raw.encodeScalar(w, msec)
}
func (v *TimestampValue) decode(r io.Reader) error {
n, err := decodeUint64(r)
if err != nil {
return err
}
*v = TimestampValue(timeFromEpochMilli(int64(n)))
return nil
}
// MarshalJSON implements the json.Marshaler interface
func (v TimestampValue) MarshalJSON() ([]byte, error) {
return []byte(v.String()), nil
}
func timeFromEpochMilli(t int64) time.Time {
secs := t / 1e3
msec := t % 1e3
return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
}
// An UUIDValue provides eventstream encoding, and representation of a UUID
// value.
type UUIDValue [16]byte
// Get returns the underlying value.
func (v UUIDValue) Get() interface{} {
return v[:]
}
// valueType returns the EventStream header value type value.
func (UUIDValue) valueType() valueType {
return uuidValueType
}
func (v UUIDValue) String() string {
var scratch [36]byte
const dash = '-'
hex.Encode(scratch[:8], v[0:4])
scratch[8] = dash
hex.Encode(scratch[9:13], v[4:6])
scratch[13] = dash
hex.Encode(scratch[14:18], v[6:8])
scratch[18] = dash
hex.Encode(scratch[19:23], v[8:10])
scratch[23] = dash
hex.Encode(scratch[24:], v[10:])
return string(scratch[:])
}
// encode encodes the UUIDValue into an eventstream binary value
// representation.
func (v UUIDValue) encode(w io.Writer) error {
raw := rawValue{
Type: v.valueType(),
}
return raw.encodeFixedSlice(w, v[:])
}
func (v *UUIDValue) decode(r io.Reader) error {
tv := (*v)[:]
return decodeFixedBytesValue(r, tv)
}

@ -1,117 +0,0 @@
package eventstream
import (
"bytes"
"encoding/binary"
"hash/crc32"
)
const preludeLen = 8
const preludeCRCLen = 4
const msgCRCLen = 4
const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
const maxPayloadLen = 1024 * 1024 * 16 // 16MB
const maxHeadersLen = 1024 * 128 // 128KB
const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen
var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
// A Message provides the eventstream message representation.
type Message struct {
Headers Headers
Payload []byte
}
func (m *Message) rawMessage() (rawMessage, error) {
var raw rawMessage
if len(m.Headers) > 0 {
var headers bytes.Buffer
if err := EncodeHeaders(&headers, m.Headers); err != nil {
return rawMessage{}, err
}
raw.Headers = headers.Bytes()
raw.HeadersLen = uint32(len(raw.Headers))
}
raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
hash := crc32.New(crc32IEEETable)
binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
raw.PreludeCRC = hash.Sum32()
binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
if raw.HeadersLen > 0 {
hash.Write(raw.Headers)
}
// Read payload bytes and update hash for it as well.
if len(m.Payload) > 0 {
raw.Payload = m.Payload
hash.Write(raw.Payload)
}
raw.CRC = hash.Sum32()
return raw, nil
}
// Clone returns a deep copy of the message.
func (m Message) Clone() Message {
var payload []byte
if m.Payload != nil {
payload = make([]byte, len(m.Payload))
copy(payload, m.Payload)
}
return Message{
Headers: m.Headers.Clone(),
Payload: payload,
}
}
type messagePrelude struct {
Length uint32
HeadersLen uint32
PreludeCRC uint32
}
func (p messagePrelude) PayloadLen() uint32 {
return p.Length - p.HeadersLen - minMsgLen
}
func (p messagePrelude) ValidateLens() error {
if p.Length == 0 || p.Length > maxMsgLen {
return LengthError{
Part: "message prelude",
Want: maxMsgLen,
Have: int(p.Length),
}
}
if p.HeadersLen > maxHeadersLen {
return LengthError{
Part: "message headers",
Want: maxHeadersLen,
Have: int(p.HeadersLen),
}
}
if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen {
return LengthError{
Part: "message payload",
Want: maxPayloadLen,
Have: int(payloadLen),
}
}
return nil
}
type rawMessage struct {
messagePrelude
Headers []byte
Payload []byte
CRC uint32
}

@ -1,61 +0,0 @@
package query
import (
"fmt"
"net/url"
)
// Array represents the encoding of Query lists and sets. A Query array is a
// representation of a list of values of a fixed type. A serialized array might
// look like the following:
//
// ListName.member.1=foo
// &ListName.member.2=bar
// &Listname.member.3=baz
type Array struct {
// The query values to add the array to.
values url.Values
// The array's prefix, which includes the names of all parent structures
// and ends with the name of the list. For example, the prefix might be
// "ParentStructure.ListName". This prefix will be used to form the full
// keys for each element in the list. For example, an entry might have the
// key "ParentStructure.ListName.member.MemberName.1".
//
// While this is currently represented as a string that gets added to, it
// could also be represented as a stack that only gets condensed into a
// string when a finalized key is created. This could potentially reduce
// allocations.
prefix string
// Whether the list is flat or not. A list that is not flat will produce the
// following entry to the url.Values for a given entry:
// ListName.MemberName.1=value
// A list that is flat will produce the following:
// ListName.1=value
flat bool
// The location name of the member. In most cases this should be "member".
memberName string
// Elements are stored in values, so we keep track of the list size here.
size int32
}
func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
return &Array{
values: values,
prefix: prefix,
flat: flat,
memberName: memberName,
}
}
// Value adds a new element to the Query Array. Returns a Value type used to
// encode the array element.
func (a *Array) Value() Value {
// Query lists start a 1, so adjust the size first
a.size++
prefix := a.prefix
if !a.flat {
prefix = fmt.Sprintf("%s.%s", prefix, a.memberName)
}
// Lists can't have flat members
return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false)
}

@ -1,80 +0,0 @@
package query
import (
"io"
"net/url"
"sort"
)
// Encoder is a Query encoder that supports construction of Query body
// values using methods.
type Encoder struct {
// The query values that will be built up to manage encoding.
values url.Values
// The writer that the encoded body will be written to.
writer io.Writer
Value
}
// NewEncoder returns a new Query body encoder
func NewEncoder(writer io.Writer) *Encoder {
values := url.Values{}
return &Encoder{
values: values,
writer: writer,
Value: newBaseValue(values),
}
}
// Encode returns the []byte slice representing the current
// state of the Query encoder.
func (e Encoder) Encode() error {
ws, ok := e.writer.(interface{ WriteString(string) (int, error) })
if !ok {
// Fall back to less optimal byte slice casting if WriteString isn't available.
ws = &wrapWriteString{writer: e.writer}
}
// Get the keys and sort them to have a stable output
keys := make([]string, 0, len(e.values))
for k := range e.values {
keys = append(keys, k)
}
sort.Strings(keys)
isFirstEntry := true
for _, key := range keys {
queryValues := e.values[key]
escapedKey := url.QueryEscape(key)
for _, value := range queryValues {
if !isFirstEntry {
if _, err := ws.WriteString(`&`); err != nil {
return err
}
} else {
isFirstEntry = false
}
if _, err := ws.WriteString(escapedKey); err != nil {
return err
}
if _, err := ws.WriteString(`=`); err != nil {
return err
}
if _, err := ws.WriteString(url.QueryEscape(value)); err != nil {
return err
}
}
}
return nil
}
// wrapWriteString wraps an io.Writer to provide a WriteString method
// where one is not available.
type wrapWriteString struct {
writer io.Writer
}
// WriteString writes a string to the wrapped writer by casting it to
// a byte array first.
func (w wrapWriteString) WriteString(v string) (int, error) {
return w.writer.Write([]byte(v))
}

@ -1,78 +0,0 @@
package query
import (
"fmt"
"net/url"
)
// Map represents the encoding of Query maps. A Query map is a representation
// of a mapping of arbitrary string keys to arbitrary values of a fixed type.
// A Map differs from an Object in that the set of keys is not fixed, in that
// the values must all be of the same type, and that map entries are ordered.
// A serialized map might look like the following:
//
// MapName.entry.1.key=Foo
// &MapName.entry.1.value=spam
// &MapName.entry.2.key=Bar
// &MapName.entry.2.value=eggs
type Map struct {
// The query values to add the map to.
values url.Values
// The map's prefix, which includes the names of all parent structures
// and ends with the name of the object. For example, the prefix might be
// "ParentStructure.MapName". This prefix will be used to form the full
// keys for each key-value pair of the map. For example, a value might have
// the key "ParentStructure.MapName.1.value".
//
// While this is currently represented as a string that gets added to, it
// could also be represented as a stack that only gets condensed into a
// string when a finalized key is created. This could potentially reduce
// allocations.
prefix string
// Whether the map is flat or not. A map that is not flat will produce the
// following entries to the url.Values for a given key-value pair:
// MapName.entry.1.KeyLocationName=mykey
// MapName.entry.1.ValueLocationName=myvalue
// A map that is flat will produce the following:
// MapName.1.KeyLocationName=mykey
// MapName.1.ValueLocationName=myvalue
flat bool
// The location name of the key. In most cases this should be "key".
keyLocationName string
// The location name of the value. In most cases this should be "value".
valueLocationName string
// Elements are stored in values, so we keep track of the list size here.
size int32
}
func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map {
return &Map{
values: values,
prefix: prefix,
flat: flat,
keyLocationName: keyLocationName,
valueLocationName: valueLocationName,
}
}
// Key adds the given named key to the Query map.
// Returns a Value encoder that should be used to encode a Query value type.
func (m *Map) Key(name string) Value {
// Query lists start a 1, so adjust the size first
m.size++
var key string
var value string
if m.flat {
key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName)
value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName)
} else {
key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName)
value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName)
}
// The key can only be a string, so we just go ahead and set it here
newValue(m.values, key, false).String(name)
// Maps can't have flat members
return newValue(m.values, value, false)
}

@ -1,62 +0,0 @@
package query
import (
"context"
"fmt"
"io/ioutil"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the
// operation serializer that will convert the query request body to a GET
// operation with the query message in the HTTP request querystring.
func AddAsGetRequestMiddleware(stack *middleware.Stack) error {
return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After)
}
type asGetRequest struct{}
func (*asGetRequest) ID() string { return "Query:AsGetRequest" }
func (m *asGetRequest) HandleSerialize(
ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
req, ok := input.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request)
}
req.Method = "GET"
// If the stream is not set, nothing else to do.
stream := req.GetStream()
if stream == nil {
return next.HandleSerialize(ctx, input)
}
// Clear the stream since there will not be any body.
req.Header.Del("Content-Type")
req, err = req.SetStream(nil)
if err != nil {
return out, metadata, fmt.Errorf("unable update request body %w", err)
}
input.Request = req
// Update request query with the body's query string value.
delim := ""
if len(req.URL.RawQuery) != 0 {
delim = "&"
}
b, err := ioutil.ReadAll(stream)
if err != nil {
return out, metadata, fmt.Errorf("unable to get request body %w", err)
}
req.URL.RawQuery += delim + string(b)
return next.HandleSerialize(ctx, input)
}

@ -1,56 +0,0 @@
package query
import (
"fmt"
"net/url"
)
// Object represents the encoding of Query structures and unions. A Query
// object is a representation of a mapping of string keys to arbitrary
// values where there is a fixed set of keys whose values each have their
// own known type. A serialized object might look like the following:
//
// ObjectName.Foo=value
// &ObjectName.Bar=5
type Object struct {
// The query values to add the object to.
values url.Values
// The object's prefix, which includes the names of all parent structures
// and ends with the name of the object. For example, the prefix might be
// "ParentStructure.ObjectName". This prefix will be used to form the full
// keys for each member of the object. For example, a member might have the
// key "ParentStructure.ObjectName.MemberName".
//
// While this is currently represented as a string that gets added to, it
// could also be represented as a stack that only gets condensed into a
// string when a finalized key is created. This could potentially reduce
// allocations.
prefix string
}
func newObject(values url.Values, prefix string) *Object {
return &Object{
values: values,
prefix: prefix,
}
}
// Key adds the given named key to the Query object.
// Returns a Value encoder that should be used to encode a Query value type.
func (o *Object) Key(name string) Value {
return o.key(name, false)
}
// FlatKey adds the given named key to the Query object.
// Returns a Value encoder that should be used to encode a Query value type. The
// value will be flattened if it is a map or array.
func (o *Object) FlatKey(name string) Value {
return o.key(name, true)
}
func (o *Object) key(name string, flatValue bool) Value {
if o.prefix != "" {
return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue)
}
return newValue(o.values, name, flatValue)
}

@ -1,106 +0,0 @@
package query
import (
"math/big"
"net/url"
"github.com/aws/smithy-go/encoding/httpbinding"
)
// Value represents a Query Value type.
type Value struct {
// The query values to add the value to.
values url.Values
// The value's key, which will form the prefix for complex types.
key string
// Whether the value should be flattened or not if it's a flattenable type.
flat bool
queryValue httpbinding.QueryValue
}
func newValue(values url.Values, key string, flat bool) Value {
return Value{
values: values,
key: key,
flat: flat,
queryValue: httpbinding.NewQueryValue(values, key, false),
}
}
func newBaseValue(values url.Values) Value {
return Value{
values: values,
queryValue: httpbinding.NewQueryValue(nil, "", false),
}
}
// Array returns a new Array encoder.
func (qv Value) Array(locationName string) *Array {
return newArray(qv.values, qv.key, qv.flat, locationName)
}
// Object returns a new Object encoder.
func (qv Value) Object() *Object {
return newObject(qv.values, qv.key)
}
// Map returns a new Map encoder.
func (qv Value) Map(keyLocationName string, valueLocationName string) *Map {
return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName)
}
// Base64EncodeBytes encodes v as a base64 query string value.
// This is intended to enable compatibility with the JSON encoder.
func (qv Value) Base64EncodeBytes(v []byte) {
qv.queryValue.Blob(v)
}
// Boolean encodes v as a query string value
func (qv Value) Boolean(v bool) {
qv.queryValue.Boolean(v)
}
// String encodes v as a query string value
func (qv Value) String(v string) {
qv.queryValue.String(v)
}
// Byte encodes v as a query string value
func (qv Value) Byte(v int8) {
qv.queryValue.Byte(v)
}
// Short encodes v as a query string value
func (qv Value) Short(v int16) {
qv.queryValue.Short(v)
}
// Integer encodes v as a query string value
func (qv Value) Integer(v int32) {
qv.queryValue.Integer(v)
}
// Long encodes v as a query string value
func (qv Value) Long(v int64) {
qv.queryValue.Long(v)
}
// Float encodes v as a query string value
func (qv Value) Float(v float32) {
qv.queryValue.Float(v)
}
// Double encodes v as a query string value
func (qv Value) Double(v float64) {
qv.queryValue.Double(v)
}
// BigInteger encodes v as a query string value
func (qv Value) BigInteger(v *big.Int) {
qv.queryValue.BigInteger(v)
}
// BigDecimal encodes v as a query string value
func (qv Value) BigDecimal(v *big.Float) {
qv.queryValue.BigDecimal(v)
}

@ -1,85 +0,0 @@
package restjson
import (
"encoding/json"
"io"
"strings"
"github.com/aws/smithy-go"
)
// GetErrorInfo util looks for code, __type, and message members in the
// json body. These members are optionally available, and the function
// returns the value of member if it is available. This function is useful to
// identify the error code, msg in a REST JSON error response.
func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) {
var errInfo struct {
Code string
Type string `json:"__type"`
Message string
}
err = decoder.Decode(&errInfo)
if err != nil {
if err == io.EOF {
return errorType, message, nil
}
return errorType, message, err
}
// assign error type
if len(errInfo.Code) != 0 {
errorType = errInfo.Code
} else if len(errInfo.Type) != 0 {
errorType = errInfo.Type
}
// assign error message
if len(errInfo.Message) != 0 {
message = errInfo.Message
}
// sanitize error
if len(errorType) != 0 {
errorType = SanitizeErrorCode(errorType)
}
return errorType, message, nil
}
// SanitizeErrorCode sanitizes the errorCode string .
// The rule for sanitizing is if a `:` character is present, then take only the
// contents before the first : character in the value.
// If a # character is present, then take only the contents after the
// first # character in the value.
func SanitizeErrorCode(errorCode string) string {
if strings.ContainsAny(errorCode, ":") {
errorCode = strings.SplitN(errorCode, ":", 2)[0]
}
if strings.ContainsAny(errorCode, "#") {
errorCode = strings.SplitN(errorCode, "#", 2)[1]
}
return errorCode
}
// GetSmithyGenericAPIError returns smithy generic api error and an error interface.
// Takes in json decoder, and error Code string as args. The function retrieves error message
// and error code from the decoder body. If errorCode of length greater than 0 is passed in as
// an argument, it is used instead.
func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) {
errorType, message, err := GetErrorInfo(decoder)
if err != nil {
return nil, err
}
if len(errorCode) == 0 {
errorCode = errorType
}
return &smithy.GenericAPIError{
Code: errorCode,
Message: message,
}, nil
}

@ -1,56 +0,0 @@
package xml
import (
"encoding/xml"
"fmt"
"io"
)
// ErrorComponents represents the error response fields
// that will be deserialized from an xml error response body
type ErrorComponents struct {
Code string
Message string
RequestID string
}
// GetErrorResponseComponents returns the error fields from an xml error response body
func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
if noErrorWrapping {
var errResponse noWrappedErrorResponse
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
}
var errResponse wrappedErrorResponse
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
}
// noWrappedErrorResponse represents the error response body with
// no internal <Error></Error wrapping
type noWrappedErrorResponse struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
RequestID string `xml:"RequestId"`
}
// wrappedErrorResponse represents the error response body
// wrapped within <Error>...</Error>
type wrappedErrorResponse struct {
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
RequestID string `xml:"RequestId"`
}

@ -1,96 +0,0 @@
package ratelimit
import (
"sync"
)
// TokenBucket provides a concurrency safe utility for adding and removing
// tokens from the available token bucket.
type TokenBucket struct {
remainingTokens uint
maxCapacity uint
minCapacity uint
mu sync.Mutex
}
// NewTokenBucket returns an initialized TokenBucket with the capacity
// specified.
func NewTokenBucket(i uint) *TokenBucket {
return &TokenBucket{
remainingTokens: i,
maxCapacity: i,
minCapacity: 1,
}
}
// Retrieve attempts to reduce the available tokens by the amount requested. If
// there are tokens available true will be returned along with the number of
// available tokens remaining. If amount requested is larger than the available
// capacity, false will be returned along with the available capacity. If the
// amount is less than the available capacity, the capacity will be reduced by
// that amount, and the remaining capacity and true will be returned.
func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) {
t.mu.Lock()
defer t.mu.Unlock()
if amount > t.remainingTokens {
return t.remainingTokens, false
}
t.remainingTokens -= amount
return t.remainingTokens, true
}
// Refund returns the amount of tokens back to the available token bucket, up
// to the initial capacity.
func (t *TokenBucket) Refund(amount uint) {
t.mu.Lock()
defer t.mu.Unlock()
// Capacity cannot exceed max capacity.
t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity)
}
// Capacity returns the maximum capacity of tokens that the bucket could
// contain.
func (t *TokenBucket) Capacity() uint {
t.mu.Lock()
defer t.mu.Unlock()
return t.maxCapacity
}
// Remaining returns the number of tokens that remaining in the bucket.
func (t *TokenBucket) Remaining() uint {
t.mu.Lock()
defer t.mu.Unlock()
return t.remainingTokens
}
// Resize adjusts the size of the token bucket. Returns the capacity remaining.
func (t *TokenBucket) Resize(size uint) uint {
t.mu.Lock()
defer t.mu.Unlock()
t.maxCapacity = uintMax(size, t.minCapacity)
// Capacity needs to be capped at max capacity, if max size reduced.
t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity)
return t.remainingTokens
}
func uintMin(a, b uint) uint {
if a < b {
return a
}
return b
}
func uintMax(a, b uint) uint {
if a > b {
return a
}
return b
}

@ -1,87 +0,0 @@
package ratelimit
import (
"context"
"fmt"
)
type rateToken struct {
tokenCost uint
bucket *TokenBucket
}
func (t rateToken) release() error {
t.bucket.Refund(t.tokenCost)
return nil
}
// TokenRateLimit provides a Token Bucket RateLimiter implementation
// that limits the overall number of retry attempts that can be made across
// operation invocations.
type TokenRateLimit struct {
bucket *TokenBucket
}
// NewTokenRateLimit returns an TokenRateLimit with default values.
// Functional options can configure the retry rate limiter.
func NewTokenRateLimit(tokens uint) *TokenRateLimit {
return &TokenRateLimit{
bucket: NewTokenBucket(tokens),
}
}
func isTimeoutError(error) bool {
return false
}
type canceledError struct {
Err error
}
func (c canceledError) CanceledError() bool { return true }
func (c canceledError) Unwrap() error { return c.Err }
func (c canceledError) Error() string {
return fmt.Sprintf("canceled, %v", c.Err)
}
// GetToken may cause a available pool of retry quota to be
// decremented. Will return an error if the decremented value can not be
// reduced from the retry quota.
func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) {
select {
case <-ctx.Done():
return nil, canceledError{Err: ctx.Err()}
default:
}
if avail, ok := l.bucket.Retrieve(cost); !ok {
return nil, QuotaExceededError{Available: avail, Requested: cost}
}
return rateToken{
tokenCost: cost,
bucket: l.bucket,
}.release, nil
}
// AddTokens increments the token bucket by a fixed amount.
func (l *TokenRateLimit) AddTokens(v uint) error {
l.bucket.Refund(v)
return nil
}
// Remaining returns the number of remaining tokens in the bucket.
func (l *TokenRateLimit) Remaining() uint {
return l.bucket.Remaining()
}
// QuotaExceededError provides the SDK error when the retries for a given
// token bucket have been exhausted.
type QuotaExceededError struct {
Available uint
Requested uint
}
func (e QuotaExceededError) Error() string {
return fmt.Sprintf("retry quota exceeded, %d available, %d requested",
e.Available, e.Requested)
}

@ -1,25 +0,0 @@
package aws
import (
"fmt"
)
// TODO remove replace with smithy.CanceledError
// RequestCanceledError is the error that will be returned by an API request
// that was canceled. Requests given a Context may return this error when
// canceled.
type RequestCanceledError struct {
Err error
}
// CanceledError returns true to satisfy interfaces checking for canceled errors.
func (*RequestCanceledError) CanceledError() bool { return true }
// Unwrap returns the underlying error, if there was one.
func (e *RequestCanceledError) Unwrap() error {
return e.Err
}
func (e *RequestCanceledError) Error() string {
return fmt.Sprintf("request canceled, %v", e.Err)
}

@ -1,156 +0,0 @@
package retry
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
const (
// DefaultRequestCost is the cost of a single request from the adaptive
// rate limited token bucket.
DefaultRequestCost uint = 1
)
// DefaultThrottles provides the set of errors considered throttle errors that
// are checked by default.
var DefaultThrottles = []IsErrorThrottle{
ThrottleErrorCode{
Codes: DefaultThrottleErrorCodes,
},
}
// AdaptiveModeOptions provides the functional options for configuring the
// adaptive retry mode, and delay behavior.
type AdaptiveModeOptions struct {
// If the adaptive token bucket is empty, when an attempt will be made
// AdaptiveMode will sleep until a token is available. This can occur when
// attempts fail with throttle errors. Use this option to disable the sleep
// until token is available, and return error immediately.
FailOnNoAttemptTokens bool
// The cost of an attempt from the AdaptiveMode's adaptive token bucket.
RequestCost uint
// Set of strategies to determine if the attempt failed due to a throttle
// error.
//
// It is safe to append to this list in NewAdaptiveMode's functional options.
Throttles []IsErrorThrottle
// Set of options for standard retry mode that AdaptiveMode is built on top
// of. AdaptiveMode may apply its own defaults to Standard retry mode that
// are different than the defaults of NewStandard. Use these options to
// override the default options.
StandardOptions []func(*StandardOptions)
}
// AdaptiveMode provides an experimental retry strategy that expands on the
// Standard retry strategy, adding client attempt rate limits. The attempt rate
// limit is initially unrestricted, but becomes restricted when the attempt
// fails with for a throttle error. When restricted AdaptiveMode may need to
// sleep before an attempt is made, if too many throttles have been received.
// AdaptiveMode's sleep can be canceled with context cancel. Set
// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep,
// to fail fast.
//
// Eventually unrestricted attempt rate limit will be restored once attempts no
// longer are failing due to throttle errors.
type AdaptiveMode struct {
options AdaptiveModeOptions
throttles IsErrorThrottles
retryer aws.RetryerV2
rateLimit *adaptiveRateLimit
}
// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy.
func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode {
o := AdaptiveModeOptions{
RequestCost: DefaultRequestCost,
Throttles: append([]IsErrorThrottle{}, DefaultThrottles...),
}
for _, fn := range optFns {
fn(&o)
}
return &AdaptiveMode{
options: o,
throttles: IsErrorThrottles(o.Throttles),
retryer: NewStandard(o.StandardOptions...),
rateLimit: newAdaptiveRateLimit(),
}
}
// IsErrorRetryable returns if the failed attempt is retryable. This check
// should determine if the error can be retried, or if the error is
// terminal.
func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
return a.retryer.IsErrorRetryable(err)
}
// MaxAttempts returns the maximum number of attempts that can be made for
// an attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable.
func (a *AdaptiveMode) MaxAttempts() int {
return a.retryer.MaxAttempts()
}
// RetryDelay returns the delay that should be used before retrying the
// attempt. Will return error if the if the delay could not be determined.
func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) (
time.Duration, error,
) {
return a.retryer.RetryDelay(attempt, opErr)
}
// GetRetryToken attempts to deduct the retry cost from the retry token pool.
// Returning the token release function, or error.
func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) (
releaseToken func(error) error, err error,
) {
return a.retryer.GetRetryToken(ctx, opErr)
}
// GetInitialToken returns the initial attempt token that can increment the
// retry token pool if the attempt is successful.
//
// Deprecated: This method does not provide a way to block using Context,
// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only
// present to implement Retryer interface.
func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
return nopRelease
}
// GetAttemptToken returns the attempt token that can be used to rate limit
// attempt calls. Will be used by the SDK's retry package's Attempt
// middleware to get an attempt token prior to calling the temp and releasing
// the attempt token after the attempt has been made.
func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
for {
acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost)
if acquiredToken {
break
}
if a.options.FailOnNoAttemptTokens {
return nil, fmt.Errorf(
"unable to get attempt token, and FailOnNoAttemptTokens enables")
}
if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil {
return nil, fmt.Errorf("failed to wait for token to be available, %w", err)
}
}
return a.handleResponse, nil
}
func (a *AdaptiveMode) handleResponse(opErr error) error {
throttled := a.throttles.IsErrorThrottle(opErr).Bool()
a.rateLimit.Update(throttled)
return nil
}

@ -1,158 +0,0 @@
package retry
import (
"math"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
type adaptiveRateLimit struct {
tokenBucketEnabled bool
smooth float64
beta float64
scaleConstant float64
minFillRate float64
fillRate float64
calculatedRate float64
lastRefilled time.Time
measuredTxRate float64
lastTxRateBucket float64
requestCount int64
lastMaxRate float64
lastThrottleTime time.Time
timeWindow float64
tokenBucket *adaptiveTokenBucket
mu sync.Mutex
}
func newAdaptiveRateLimit() *adaptiveRateLimit {
now := sdk.NowTime()
return &adaptiveRateLimit{
smooth: 0.8,
beta: 0.7,
scaleConstant: 0.4,
minFillRate: 0.5,
lastTxRateBucket: math.Floor(timeFloat64Seconds(now)),
lastThrottleTime: now,
tokenBucket: newAdaptiveTokenBucket(0),
}
}
func (a *adaptiveRateLimit) Enable(v bool) {
a.mu.Lock()
defer a.mu.Unlock()
a.tokenBucketEnabled = v
}
func (a *adaptiveRateLimit) AcquireToken(amount uint) (
tokenAcquired bool, waitTryAgain time.Duration,
) {
a.mu.Lock()
defer a.mu.Unlock()
if !a.tokenBucketEnabled {
return true, 0
}
a.tokenBucketRefill()
available, ok := a.tokenBucket.Retrieve(float64(amount))
if !ok {
waitDur := float64Seconds((float64(amount) - available) / a.fillRate)
return false, waitDur
}
return true, 0
}
func (a *adaptiveRateLimit) Update(throttled bool) {
a.mu.Lock()
defer a.mu.Unlock()
a.updateMeasuredRate()
if throttled {
rateToUse := a.measuredTxRate
if a.tokenBucketEnabled {
rateToUse = math.Min(a.measuredTxRate, a.fillRate)
}
a.lastMaxRate = rateToUse
a.calculateTimeWindow()
a.lastThrottleTime = sdk.NowTime()
a.calculatedRate = a.cubicThrottle(rateToUse)
a.tokenBucketEnabled = true
} else {
a.calculateTimeWindow()
a.calculatedRate = a.cubicSuccess(sdk.NowTime())
}
newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate)
a.tokenBucketUpdateRate(newRate)
}
func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 {
dt := secondsFloat64(t.Sub(a.lastThrottleTime))
return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate
}
func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 {
return rateToUse * a.beta
}
func (a *adaptiveRateLimit) calculateTimeWindow() {
a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.)
}
func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) {
a.tokenBucketRefill()
a.fillRate = math.Max(newRPS, a.minFillRate)
a.tokenBucket.Resize(newRPS)
}
func (a *adaptiveRateLimit) updateMeasuredRate() {
now := sdk.NowTime()
timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2.
a.requestCount++
if timeBucket > a.lastTxRateBucket {
currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket)
a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth))
a.requestCount = 0
a.lastTxRateBucket = timeBucket
}
}
func (a *adaptiveRateLimit) tokenBucketRefill() {
now := sdk.NowTime()
if a.lastRefilled.IsZero() {
a.lastRefilled = now
return
}
fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate
a.tokenBucket.Refund(fillAmount)
a.lastRefilled = now
}
func float64Seconds(v float64) time.Duration {
return time.Duration(v * float64(time.Second))
}
func secondsFloat64(v time.Duration) float64 {
return float64(v) / float64(time.Second)
}
func timeFloat64Seconds(v time.Time) float64 {
return float64(v.UnixNano()) / float64(time.Second)
}

@ -1,83 +0,0 @@
package retry
import (
"math"
"sync"
)
// adaptiveTokenBucket provides a concurrency safe utility for adding and
// removing tokens from the available token bucket.
type adaptiveTokenBucket struct {
remainingTokens float64
maxCapacity float64
minCapacity float64
mu sync.Mutex
}
// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the
// capacity specified.
func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket {
return &adaptiveTokenBucket{
remainingTokens: i,
maxCapacity: i,
minCapacity: 1,
}
}
// Retrieve attempts to reduce the available tokens by the amount requested. If
// there are tokens available true will be returned along with the number of
// available tokens remaining. If amount requested is larger than the available
// capacity, false will be returned along with the available capacity. If the
// amount is less than the available capacity, the capacity will be reduced by
// that amount, and the remaining capacity and true will be returned.
func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) {
t.mu.Lock()
defer t.mu.Unlock()
if amount > t.remainingTokens {
return t.remainingTokens, false
}
t.remainingTokens -= amount
return t.remainingTokens, true
}
// Refund returns the amount of tokens back to the available token bucket, up
// to the initial capacity.
func (t *adaptiveTokenBucket) Refund(amount float64) {
t.mu.Lock()
defer t.mu.Unlock()
// Capacity cannot exceed max capacity.
t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity)
}
// Capacity returns the maximum capacity of tokens that the bucket could
// contain.
func (t *adaptiveTokenBucket) Capacity() float64 {
t.mu.Lock()
defer t.mu.Unlock()
return t.maxCapacity
}
// Remaining returns the number of tokens that remaining in the bucket.
func (t *adaptiveTokenBucket) Remaining() float64 {
t.mu.Lock()
defer t.mu.Unlock()
return t.remainingTokens
}
// Resize adjusts the size of the token bucket. Returns the capacity remaining.
func (t *adaptiveTokenBucket) Resize(size float64) float64 {
t.mu.Lock()
defer t.mu.Unlock()
t.maxCapacity = math.Max(size, t.minCapacity)
// Capacity needs to be capped at max capacity, if max size reduced.
t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity)
return t.remainingTokens
}

@ -1,80 +0,0 @@
// Package retry provides interfaces and implementations for SDK request retry behavior.
//
// # Retryer Interface and Implementations
//
// This package defines Retryer interface that is used to either implement custom retry behavior
// or to extend the existing retry implementations provided by the SDK. This package provides a single
// retry implementation: Standard.
//
// # Standard
//
// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited
// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs.
// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client,
// and uses an additional delay policy to limit the time between a requests subsequent attempts.
//
// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether
// a given error is retryable. By default this list of retryables includes the following:
// - Retrying errors that implement the RetryableError method, and return true.
// - Connection Errors
// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
// - Connection Reset Errors.
// - net.OpErr types that are dialing errors or are temporary.
// - HTTP Status Codes: 500, 502, 503, and 504.
// - API Error Codes
// - RequestTimeout, RequestTimeoutException
// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
// RequestThrottled, SlowDown, EC2ThrottledException
// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
// - TransactionInProgressException, PriorRequestNotComplete
//
// The standard retryer will not retry a request in the event if the context associated with the request
// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context
// value.
//
// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
// and the retry delay policy.
//
// For example to modify the default retry attempts for the standard retryer:
//
// // configure the custom retryer
// customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
// o.MaxAttempts = 5
// })
//
// // create a service client with the retryer
// s3.NewFromConfig(cfg, func(o *s3.Options) {
// o.Retryer = customRetry
// })
//
// # Utilities
//
// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic
// way. These are:
//
// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable
// in addition to those considered retryable by the provided retryer.
//
// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping
// a retryer implementation.
//
// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
// request by wrapping a retryer implementation.
//
// The following package functions have been provided to easily satisfy different retry interfaces to further customize
// a given retryer's behavior:
//
// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
// you can use this method to easily create custom back off policies to be used with the
// standard retryer.
//
// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be retried.
//
// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be considered a timeout.
package retry

@ -1,20 +0,0 @@
package retry
import "fmt"
// MaxAttemptsError provides the error when the maximum number of attempts have
// been exceeded.
type MaxAttemptsError struct {
Attempt int
Err error
}
func (e *MaxAttemptsError) Error() string {
return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err)
}
// Unwrap returns the nested error causing the max attempts error. Provides the
// implementation for errors.Is and errors.As to unwrap nested errors.
func (e *MaxAttemptsError) Unwrap() error {
return e.Err
}

@ -1,49 +0,0 @@
package retry
import (
"math"
"time"
"github.com/aws/aws-sdk-go-v2/internal/rand"
"github.com/aws/aws-sdk-go-v2/internal/timeconv"
)
// ExponentialJitterBackoff provides backoff delays with jitter based on the
// number of attempts.
type ExponentialJitterBackoff struct {
maxBackoff time.Duration
// precomputed number of attempts needed to reach max backoff.
maxBackoffAttempts float64
randFloat64 func() (float64, error)
}
// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured
// for the max backoff.
func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff {
return &ExponentialJitterBackoff{
maxBackoff: maxBackoff,
maxBackoffAttempts: math.Log2(
float64(maxBackoff) / float64(time.Second)),
randFloat64: rand.CryptoRandFloat64,
}
}
// BackoffDelay returns the duration to wait before the next attempt should be
// made. Returns an error if unable get a duration.
func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) {
if attempt > int(j.maxBackoffAttempts) {
return j.maxBackoff, nil
}
b, err := j.randFloat64()
if err != nil {
return 0, err
}
// [0.0, 1.0) * 2 ^ attempts
ri := int64(1 << uint64(attempt))
delaySeconds := b * float64(ri)
return timeconv.FloatSecondsDur(delaySeconds), nil
}

@ -1,52 +0,0 @@
package retry
import (
awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
)
// attemptResultsKey is a metadata accessor key to retrieve metadata
// for all request attempts.
type attemptResultsKey struct {
}
// GetAttemptResults retrieves attempts results from middleware metadata.
func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) {
m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults)
return m, ok
}
// AttemptResults represents struct containing metadata returned by all request attempts.
type AttemptResults struct {
// Results is a slice consisting attempt result from all request attempts.
// Results are stored in order request attempt is made.
Results []AttemptResult
}
// AttemptResult represents attempt result returned by a single request attempt.
type AttemptResult struct {
// Err is the error if received for the request attempt.
Err error
// Retryable denotes if request may be retried. This states if an
// error is considered retryable.
Retryable bool
// Retried indicates if this request was retried.
Retried bool
// ResponseMetadata is any existing metadata passed via the response middlewares.
ResponseMetadata middleware.Metadata
}
// addAttemptResults adds attempt results to middleware metadata
func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) {
metadata.Set(attemptResultsKey{}, v)
}
// GetRawResponse returns raw response recorded for the attempt result
func (a AttemptResult) GetRawResponse() interface{} {
return awsmiddle.GetRawResponse(a.ResponseMetadata)
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save