parent
eb007a94a9
commit
78bb94be48
@ -0,0 +1,14 @@
|
||||
Copyright (c) 2015 aliyun.com
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,345 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
|
||||
type headerSorter struct {
|
||||
Keys []string
|
||||
Vals []string
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeys get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// getAdditionalHeaderKeysV4 get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
|
||||
var keysList []string
|
||||
keysMap := make(map[string]string)
|
||||
srcKeys := make(map[string]string)
|
||||
|
||||
for k := range req.Header {
|
||||
srcKeys[strings.ToLower(k)] = ""
|
||||
}
|
||||
|
||||
for _, v := range conn.config.AdditionalHeaders {
|
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok {
|
||||
if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
|
||||
keysMap[strings.ToLower(v)] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keysMap {
|
||||
keysList = append(keysList, k)
|
||||
}
|
||||
sort.Strings(keysList)
|
||||
return keysList, keysMap
|
||||
}
|
||||
|
||||
// signHeader signs the header and sets it as the authorization header.
|
||||
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
|
||||
akIf := conn.config.GetCredentials()
|
||||
authorizationStr := ""
|
||||
if conn.config.AuthVersion == AuthV4 {
|
||||
strDay := ""
|
||||
strDate := req.Header.Get(HttpHeaderOssDate)
|
||||
if strDate == "" {
|
||||
strDate = req.Header.Get(HTTPHeaderDate)
|
||||
t, _ := time.Parse(http.TimeFormat, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
} else {
|
||||
t, _ := time.Parse(iso8601DateFormatSecond, strDate)
|
||||
strDay = t.Format("20060102")
|
||||
}
|
||||
|
||||
signHeaderProduct := conn.config.GetSignProduct()
|
||||
signHeaderRegion := conn.config.GetSignRegion()
|
||||
|
||||
additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
} else {
|
||||
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
}
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req)
|
||||
if len(additionalList) > 0 {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
|
||||
additionnalHeadersStr := strings.Join(additionalList, ";")
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
} else {
|
||||
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
|
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
|
||||
}
|
||||
} else {
|
||||
// Get the final authorization string
|
||||
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
|
||||
}
|
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
|
||||
for k, v := range req.Header {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
if _, ok := additionalMap[strings.ToLower(k)]; ok {
|
||||
ossHeadersMap[strings.ToLower(k)] = v[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := req.Header.Get(HTTPHeaderDate)
|
||||
contentType := req.Header.Get(HTTPHeaderContentType)
|
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
|
||||
|
||||
// default is v1 signature
|
||||
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
|
||||
// v2 signature
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
|
||||
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
|
||||
}
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
return signedStr
|
||||
}
|
||||
|
||||
func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string) string {
|
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string)
|
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
|
||||
for k, v := range req.Header {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
|
||||
ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
|
||||
} else {
|
||||
if _, ok := additionalMap[strings.ToLower(k)]; ok {
|
||||
ossHeadersMap[strings.ToLower(k)] = strings.Trim(v[0], " ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Required parameters
|
||||
signDate := ""
|
||||
dateFormat := ""
|
||||
date := req.Header.Get(HTTPHeaderDate)
|
||||
if date != "" {
|
||||
signDate = date
|
||||
dateFormat = http.TimeFormat
|
||||
}
|
||||
|
||||
ossDate := req.Header.Get(HttpHeaderOssDate)
|
||||
_, ok := ossHeadersMap[strings.ToLower(HttpHeaderOssDate)]
|
||||
if ossDate != "" {
|
||||
signDate = ossDate
|
||||
dateFormat = iso8601DateFormatSecond
|
||||
if !ok {
|
||||
ossHeadersMap[strings.ToLower(HttpHeaderOssDate)] = strings.Trim(ossDate, " ")
|
||||
}
|
||||
}
|
||||
|
||||
contentType := req.Header.Get(HTTPHeaderContentType)
|
||||
_, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentType)]
|
||||
if contentType != "" && !ok {
|
||||
ossHeadersMap[strings.ToLower(HTTPHeaderContentType)] = strings.Trim(contentType, " ")
|
||||
}
|
||||
|
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
|
||||
_, ok = ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)]
|
||||
if contentMd5 != "" && !ok {
|
||||
ossHeadersMap[strings.ToLower(HTTPHeaderContentMD5)] = strings.Trim(contentMd5, " ")
|
||||
}
|
||||
|
||||
hs := newHeaderSorter(ossHeadersMap)
|
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort()
|
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := ""
|
||||
for i := range hs.Keys {
|
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
|
||||
}
|
||||
|
||||
signStr := ""
|
||||
|
||||
// v4 signature
|
||||
hashedPayload := req.Header.Get(HttpHeaderOssContentSha256)
|
||||
|
||||
// subResource
|
||||
resource := canonicalizedResource
|
||||
subResource := ""
|
||||
subPos := strings.LastIndex(canonicalizedResource, "?")
|
||||
if subPos != -1 {
|
||||
subResource = canonicalizedResource[subPos+1:]
|
||||
resource = canonicalizedResource[0:subPos]
|
||||
}
|
||||
|
||||
// get canonical request
|
||||
canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
|
||||
rh := sha256.New()
|
||||
io.WriteString(rh, canonicalReuqest)
|
||||
hashedRequest := hex.EncodeToString(rh.Sum(nil))
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(canonicalReuqest))
|
||||
}
|
||||
|
||||
// get day,eg 20210914
|
||||
t, _ := time.Parse(dateFormat, signDate)
|
||||
strDay := t.Format("20060102")
|
||||
|
||||
signedStrV4Product := conn.config.GetSignProduct()
|
||||
signedStrV4Region := conn.config.GetSignRegion()
|
||||
|
||||
signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
|
||||
}
|
||||
|
||||
h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
|
||||
io.WriteString(h1, strDay)
|
||||
h1Key := h1.Sum(nil)
|
||||
|
||||
h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
|
||||
io.WriteString(h2, signedStrV4Region)
|
||||
h2Key := h2.Sum(nil)
|
||||
|
||||
h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
|
||||
io.WriteString(h3, signedStrV4Product)
|
||||
h3Key := h3.Sum(nil)
|
||||
|
||||
h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
|
||||
io.WriteString(h4, "aliyun_v4_request")
|
||||
h4Key := h4.Sum(nil)
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
|
||||
io.WriteString(h, signStr)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
|
||||
if params[HTTPParamAccessKeyID] == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName)
|
||||
canonParamsKeys := []string{}
|
||||
for key := range params {
|
||||
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken {
|
||||
canonParamsKeys = append(canonParamsKeys, key)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(canonParamsKeys)
|
||||
canonParamsStr := ""
|
||||
for _, key := range canonParamsKeys {
|
||||
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string))
|
||||
}
|
||||
|
||||
expireStr := strconv.FormatInt(expiration, 10)
|
||||
signStr := expireStr + "\n" + canonParamsStr + canonResource
|
||||
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
|
||||
io.WriteString(h, signStr)
|
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
return signedStr
|
||||
}
|
||||
|
||||
// newHeaderSorter is an additional function for function SignHeader.
|
||||
func newHeaderSorter(m map[string]string) *headerSorter {
|
||||
hs := &headerSorter{
|
||||
Keys: make([]string, 0, len(m)),
|
||||
Vals: make([]string, 0, len(m)),
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
hs.Keys = append(hs.Keys, k)
|
||||
hs.Vals = append(hs.Vals, v)
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Len() int {
|
||||
return len(hs.Vals)
|
||||
}
|
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
|
||||
}
|
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Swap(i, j int) {
|
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
|
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,229 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Define the level of the output log
|
||||
const (
|
||||
LogOff = iota
|
||||
Error
|
||||
Warn
|
||||
Info
|
||||
Debug
|
||||
)
|
||||
|
||||
// LogTag Tag for each level of log
|
||||
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
|
||||
|
||||
// HTTPTimeout defines HTTP timeout.
|
||||
type HTTPTimeout struct {
|
||||
ConnectTimeout time.Duration
|
||||
ReadWriteTimeout time.Duration
|
||||
HeaderTimeout time.Duration
|
||||
LongTimeout time.Duration
|
||||
IdleConnTimeout time.Duration
|
||||
}
|
||||
|
||||
// HTTPMaxConns defines max idle connections and max idle connections per host
|
||||
type HTTPMaxConns struct {
|
||||
MaxIdleConns int
|
||||
MaxIdleConnsPerHost int
|
||||
MaxConnsPerHost int
|
||||
}
|
||||
|
||||
// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken
|
||||
type Credentials interface {
|
||||
GetAccessKeyID() string
|
||||
GetAccessKeySecret() string
|
||||
GetSecurityToken() string
|
||||
}
|
||||
|
||||
// CredentialInfBuild is interface for get CredentialInf
|
||||
type CredentialsProvider interface {
|
||||
GetCredentials() Credentials
|
||||
}
|
||||
|
||||
type defaultCredentials struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeyID() string {
|
||||
return defCre.config.AccessKeyID
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeySecret() string {
|
||||
return defCre.config.AccessKeySecret
|
||||
}
|
||||
|
||||
func (defCre *defaultCredentials) GetSecurityToken() string {
|
||||
return defCre.config.SecurityToken
|
||||
}
|
||||
|
||||
type defaultCredentialsProvider struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
|
||||
return &defaultCredentials{config: defBuild.config}
|
||||
}
|
||||
|
||||
// Config defines oss configuration
|
||||
type Config struct {
|
||||
Endpoint string // OSS endpoint
|
||||
AccessKeyID string // AccessId
|
||||
AccessKeySecret string // AccessKey
|
||||
RetryTimes uint // Retry count by default it's 5.
|
||||
UserAgent string // SDK name/version/system information
|
||||
IsDebug bool // Enable debug mode. Default is false.
|
||||
Timeout uint // Timeout in seconds. By default it's 60.
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // If cname is in the endpoint.
|
||||
HTTPTimeout HTTPTimeout // HTTP timeout
|
||||
HTTPMaxConns HTTPMaxConns // Http max connections
|
||||
IsUseProxy bool // Flag of using proxy.
|
||||
ProxyHost string // Flag of using proxy host.
|
||||
IsAuthProxy bool // Flag of needing authentication.
|
||||
ProxyUser string // Proxy user
|
||||
ProxyPassword string // Proxy password
|
||||
IsEnableMD5 bool // Flag of enabling MD5 for upload.
|
||||
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
|
||||
IsEnableCRC bool // Flag of enabling CRC for upload.
|
||||
LogLevel int // Log level
|
||||
Logger *log.Logger // For write log
|
||||
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
|
||||
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
|
||||
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
|
||||
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
|
||||
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
|
||||
LocalAddr net.Addr // local client host info
|
||||
UserSetUa bool // UserAgent is set by user or not
|
||||
AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
|
||||
AdditionalHeaders []string // special http headers needed to be sign
|
||||
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
|
||||
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
|
||||
Region string // such as cn-hangzhou
|
||||
CloudBoxId string //
|
||||
Product string // oss or oss-cloudbox, default is oss
|
||||
}
|
||||
|
||||
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
|
||||
if uploadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
|
||||
} else if uploadSpeed == 0 {
|
||||
config.UploadLimitSpeed = 0
|
||||
config.UploadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.UploadLimiter, err = GetOssLimiter(uploadSpeed)
|
||||
if err == nil {
|
||||
config.UploadLimitSpeed = uploadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
|
||||
if downloadSpeed < 0 {
|
||||
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
|
||||
} else if downloadSpeed == 0 {
|
||||
config.DownloadLimitSpeed = 0
|
||||
config.DownloadLimiter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
|
||||
if err == nil {
|
||||
config.DownloadLimitSpeed = downloadSpeed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteLog output log function
|
||||
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
|
||||
if config.LogLevel < LogLevel || config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var logBuffer bytes.Buffer
|
||||
logBuffer.WriteString(LogTag[LogLevel-1])
|
||||
logBuffer.WriteString(fmt.Sprintf(format, a...))
|
||||
config.Logger.Printf("%s", logBuffer.String())
|
||||
}
|
||||
|
||||
// for get Credentials
|
||||
func (config *Config) GetCredentials() Credentials {
|
||||
return config.CredentialsProvider.GetCredentials()
|
||||
}
|
||||
|
||||
// for get Sign Product
|
||||
func (config *Config) GetSignProduct() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return "oss-cloudbox"
|
||||
}
|
||||
return "oss"
|
||||
}
|
||||
|
||||
// for get Sign Region
|
||||
func (config *Config) GetSignRegion() string {
|
||||
if config.CloudBoxId != "" {
|
||||
return config.CloudBoxId
|
||||
}
|
||||
return config.Region
|
||||
}
|
||||
|
||||
// getDefaultOssConfig gets the default configuration.
|
||||
func getDefaultOssConfig() *Config {
|
||||
config := Config{}
|
||||
|
||||
config.Endpoint = ""
|
||||
config.AccessKeyID = ""
|
||||
config.AccessKeySecret = ""
|
||||
config.RetryTimes = 5
|
||||
config.IsDebug = false
|
||||
config.UserAgent = userAgent()
|
||||
config.Timeout = 60 // Seconds
|
||||
config.SecurityToken = ""
|
||||
config.IsCname = false
|
||||
|
||||
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
|
||||
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
|
||||
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
|
||||
config.HTTPMaxConns.MaxIdleConns = 100
|
||||
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
|
||||
|
||||
config.IsUseProxy = false
|
||||
config.ProxyHost = ""
|
||||
config.IsAuthProxy = false
|
||||
config.ProxyUser = ""
|
||||
config.ProxyPassword = ""
|
||||
|
||||
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
|
||||
config.IsEnableMD5 = false
|
||||
config.IsEnableCRC = true
|
||||
|
||||
config.LogLevel = LogOff
|
||||
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
provider := &defaultCredentialsProvider{config: &config}
|
||||
config.CredentialsProvider = provider
|
||||
|
||||
config.AuthVersion = AuthV1
|
||||
config.RedirectEnabled = true
|
||||
config.InsecureSkipVerify = false
|
||||
|
||||
config.Product = "oss"
|
||||
|
||||
return &config
|
||||
}
|
@ -0,0 +1,916 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Conn defines OSS Conn
|
||||
type Conn struct {
|
||||
config *Config
|
||||
url *urlMaker
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var signKeyList = []string{"acl", "uploads", "location", "cors",
|
||||
"logging", "website", "referer", "lifecycle",
|
||||
"delete", "append", "tagging", "objectMeta",
|
||||
"uploadId", "partNumber", "security-token",
|
||||
"position", "img", "style", "styleName",
|
||||
"replication", "replicationProgress",
|
||||
"replicationLocation", "cname", "bucketInfo",
|
||||
"comp", "qos", "live", "status", "vod",
|
||||
"startTime", "endTime", "symlink",
|
||||
"x-oss-process", "response-content-type", "x-oss-traffic-limit",
|
||||
"response-content-language", "response-expires",
|
||||
"response-cache-control", "response-content-disposition",
|
||||
"response-content-encoding", "udf", "udfName", "udfImage",
|
||||
"udfId", "udfImageDesc", "udfApplication", "comp",
|
||||
"udfApplicationLog", "restore", "callback", "callback-var", "qosInfo",
|
||||
"policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment",
|
||||
"x-oss-request-payer", "sequential",
|
||||
"inventory", "inventoryId", "continuation-token", "asyncFetch",
|
||||
"worm", "wormId", "wormExtend", "withHashContext",
|
||||
"x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256",
|
||||
"x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration",
|
||||
"regionList", "cloudboxes",
|
||||
}
|
||||
|
||||
// init initializes Conn
|
||||
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
|
||||
if client == nil {
|
||||
// New transport
|
||||
transport := newTransport(conn, config)
|
||||
|
||||
// Proxy
|
||||
if conn.config.IsUseProxy {
|
||||
proxyURL, err := url.Parse(config.ProxyHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.IsAuthProxy {
|
||||
if config.ProxyPassword != "" {
|
||||
proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword)
|
||||
} else {
|
||||
proxyURL.User = url.User(config.ProxyUser)
|
||||
}
|
||||
}
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
client = &http.Client{Transport: transport}
|
||||
if !config.RedirectEnabled {
|
||||
disableHTTPRedirect(client)
|
||||
}
|
||||
}
|
||||
|
||||
conn.config = config
|
||||
conn.url = urlMaker
|
||||
conn.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do sends request and returns the response
|
||||
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
urlParams := conn.getURLParams(params)
|
||||
subResource := conn.getSubResource(params)
|
||||
uri := conn.url.getURL(bucketName, objectName, urlParams)
|
||||
|
||||
resource := ""
|
||||
if conn.config.AuthVersion != AuthV4 {
|
||||
resource = conn.getResource(bucketName, objectName, subResource)
|
||||
} else {
|
||||
resource = conn.getResourceV4(bucketName, objectName, subResource)
|
||||
}
|
||||
|
||||
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
|
||||
}
|
||||
|
||||
// DoURL sends the request with signed URL and returns the response result.
|
||||
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
// Get URI from signedURL
|
||||
uri, err := url.ParseRequestURI(signedURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := strings.ToUpper(string(method))
|
||||
req := &http.Request{
|
||||
Method: m,
|
||||
URL: uri,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: uri.Host,
|
||||
}
|
||||
|
||||
tracker := &readerTracker{completedBytes: 0}
|
||||
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
|
||||
if fd != nil {
|
||||
defer func() {
|
||||
fd.Close()
|
||||
os.Remove(fd.Name())
|
||||
}()
|
||||
}
|
||||
|
||||
if conn.config.IsAuthProxy {
|
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
|
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
req.Header.Set("Proxy-Authorization", basic)
|
||||
}
|
||||
|
||||
req.Header.Set(HTTPHeaderHost, req.Host)
|
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.LoggerHTTPReq(req)
|
||||
}
|
||||
|
||||
resp, err := conn.client.Do(req)
|
||||
if err != nil {
|
||||
// Transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
//print out http resp
|
||||
conn.LoggerHTTPResp(req, resp)
|
||||
}
|
||||
|
||||
// Transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return conn.handleResponse(resp, crc)
|
||||
}
|
||||
|
||||
func (conn Conn) getURLParams(params map[string]interface{}) string {
|
||||
// Sort
|
||||
keys := make([]string, 0, len(params))
|
||||
for k := range params {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer
|
||||
for _, k := range keys {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(url.QueryEscape(k))
|
||||
if params[k] != nil && params[k].(string) != "" {
|
||||
buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (conn Conn) getSubResource(params map[string]interface{}) string {
|
||||
// Sort
|
||||
keys := make([]string, 0, len(params))
|
||||
signParams := make(map[string]string)
|
||||
for k := range params {
|
||||
if conn.config.AuthVersion == AuthV2 || conn.config.AuthVersion == AuthV4 {
|
||||
encodedKey := url.QueryEscape(k)
|
||||
keys = append(keys, encodedKey)
|
||||
if params[k] != nil && params[k] != "" {
|
||||
signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)
|
||||
}
|
||||
} else if conn.isParamSign(k) {
|
||||
keys = append(keys, k)
|
||||
if params[k] != nil {
|
||||
signParams[k] = params[k].(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer
|
||||
for _, k := range keys {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(k)
|
||||
if _, ok := signParams[k]; ok {
|
||||
if signParams[k] != "" {
|
||||
buf.WriteString("=" + signParams[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (conn Conn) isParamSign(paramKey string) bool {
|
||||
for _, k := range signKeyList {
|
||||
if paramKey == k {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getResource gets canonicalized resource
|
||||
func (conn Conn) getResource(bucketName, objectName, subResource string) string {
|
||||
if subResource != "" {
|
||||
subResource = "?" + subResource
|
||||
}
|
||||
if bucketName == "" {
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
return url.QueryEscape("/") + subResource
|
||||
}
|
||||
return fmt.Sprintf("/%s%s", bucketName, subResource)
|
||||
}
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource
|
||||
}
|
||||
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
|
||||
}
|
||||
|
||||
// getResource gets canonicalized resource
|
||||
func (conn Conn) getResourceV4(bucketName, objectName, subResource string) string {
|
||||
if subResource != "" {
|
||||
subResource = "?" + subResource
|
||||
}
|
||||
|
||||
if bucketName == "" {
|
||||
return fmt.Sprintf("/%s", subResource)
|
||||
}
|
||||
|
||||
if objectName != "" {
|
||||
objectName = url.QueryEscape(objectName)
|
||||
objectName = strings.Replace(objectName, "+", "%20", -1)
|
||||
objectName = strings.Replace(objectName, "%2F", "/", -1)
|
||||
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
|
||||
}
|
||||
return fmt.Sprintf("/%s/%s", bucketName, subResource)
|
||||
}
|
||||
|
||||
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
|
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
|
||||
method = strings.ToUpper(method)
|
||||
req := &http.Request{
|
||||
Method: method,
|
||||
URL: uri,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: uri.Host,
|
||||
}
|
||||
|
||||
tracker := &readerTracker{completedBytes: 0}
|
||||
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
|
||||
if fd != nil {
|
||||
defer func() {
|
||||
fd.Close()
|
||||
os.Remove(fd.Name())
|
||||
}()
|
||||
}
|
||||
|
||||
if conn.config.IsAuthProxy {
|
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
|
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
req.Header.Set("Proxy-Authorization", basic)
|
||||
}
|
||||
|
||||
stNow := time.Now().UTC()
|
||||
req.Header.Set(HTTPHeaderDate, stNow.Format(http.TimeFormat))
|
||||
req.Header.Set(HTTPHeaderHost, req.Host)
|
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
|
||||
|
||||
if conn.config.AuthVersion == AuthV4 {
|
||||
req.Header.Set(HttpHeaderOssContentSha256, DefaultContentSha256)
|
||||
}
|
||||
|
||||
akIf := conn.config.GetCredentials()
|
||||
if akIf.GetSecurityToken() != "" {
|
||||
req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken())
|
||||
}
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
conn.signHeader(req, canonicalizedResource)
|
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
conn.LoggerHTTPReq(req)
|
||||
}
|
||||
|
||||
resp, err := conn.client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
// Transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conn.config.LogLevel >= Debug {
|
||||
//print out http resp
|
||||
conn.LoggerHTTPResp(req, resp)
|
||||
}
|
||||
|
||||
// Transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return conn.handleResponse(resp, crc)
|
||||
}
|
||||
|
||||
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
|
||||
akIf := conn.config.GetCredentials()
|
||||
if akIf.GetSecurityToken() != "" {
|
||||
params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
|
||||
}
|
||||
|
||||
m := strings.ToUpper(string(method))
|
||||
req := &http.Request{
|
||||
Method: m,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
|
||||
if conn.config.IsAuthProxy {
|
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
|
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
req.Header.Set("Proxy-Authorization", basic)
|
||||
}
|
||||
|
||||
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
|
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if conn.config.AuthVersion == AuthV2 {
|
||||
params[HTTPParamSignatureVersion] = "OSS2"
|
||||
params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10)
|
||||
params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID
|
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req)
|
||||
if len(additionalList) > 0 {
|
||||
params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";")
|
||||
}
|
||||
}
|
||||
|
||||
subResource := conn.getSubResource(params)
|
||||
canonicalizedResource := conn.getResource(bucketName, objectName, subResource)
|
||||
signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
|
||||
|
||||
if conn.config.AuthVersion == AuthV1 {
|
||||
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
|
||||
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
|
||||
params[HTTPParamSignature] = signedStr
|
||||
} else if conn.config.AuthVersion == AuthV2 {
|
||||
params[HTTPParamSignatureV2] = signedStr
|
||||
}
|
||||
urlParams := conn.getURLParams(params)
|
||||
return conn.url.getSignURL(bucketName, objectName, urlParams)
|
||||
}
|
||||
|
||||
func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string {
|
||||
params := map[string]interface{}{}
|
||||
if playlistName != "" {
|
||||
params[HTTPParamPlaylistName] = playlistName
|
||||
}
|
||||
expireStr := strconv.FormatInt(expiration, 10)
|
||||
params[HTTPParamExpires] = expireStr
|
||||
|
||||
akIf := conn.config.GetCredentials()
|
||||
if akIf.GetAccessKeyID() != "" {
|
||||
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
|
||||
if akIf.GetSecurityToken() != "" {
|
||||
params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
|
||||
}
|
||||
signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params)
|
||||
params[HTTPParamSignature] = signedStr
|
||||
}
|
||||
|
||||
urlParams := conn.getURLParams(params)
|
||||
return conn.url.getSignRtmpURL(bucketName, channelName, urlParams)
|
||||
}
|
||||
|
||||
// handleBody handles request body
|
||||
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
|
||||
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
|
||||
var file *os.File
|
||||
var crc hash.Hash64
|
||||
reader := body
|
||||
readerLen, err := GetReaderLen(reader)
|
||||
if err == nil {
|
||||
req.ContentLength = readerLen
|
||||
}
|
||||
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
|
||||
|
||||
// MD5
|
||||
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
|
||||
md5 := ""
|
||||
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
|
||||
req.Header.Set(HTTPHeaderContentMD5, md5)
|
||||
}
|
||||
|
||||
// CRC
|
||||
if reader != nil && conn.config.IsEnableCRC {
|
||||
crc = NewCRC(CrcTable(), initCRC)
|
||||
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
|
||||
}
|
||||
|
||||
// HTTP body
|
||||
rc, ok := reader.(io.ReadCloser)
|
||||
if !ok && reader != nil {
|
||||
rc = ioutil.NopCloser(reader)
|
||||
}
|
||||
|
||||
if conn.isUploadLimitReq(req) {
|
||||
limitReader := &LimitSpeedReader{
|
||||
reader: rc,
|
||||
ossLimiter: conn.config.UploadLimiter,
|
||||
}
|
||||
req.Body = limitReader
|
||||
} else {
|
||||
req.Body = rc
|
||||
}
|
||||
return file, crc
|
||||
}
|
||||
|
||||
// isUploadLimitReq: judge limit upload speed or not
|
||||
func (conn Conn) isUploadLimitReq(req *http.Request) bool {
|
||||
if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" {
|
||||
if req.ContentLength > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func tryGetFileSize(f *os.File) int64 {
|
||||
fInfo, _ := f.Stat()
|
||||
return fInfo.Size()
|
||||
}
|
||||
|
||||
// handleResponse handles response
|
||||
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
|
||||
var cliCRC uint64
|
||||
var srvCRC uint64
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode/100 != 2 {
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
// 4xx and 5xx indicate that the operation has error occurred
|
||||
var respBody []byte
|
||||
respBody, err := readResponseBody(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
err = ServiceError{
|
||||
StatusCode: statusCode,
|
||||
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
|
||||
}
|
||||
} else {
|
||||
// Response contains storage service error object, unmarshal
|
||||
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
|
||||
resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
if errIn != nil { // error unmarshaling the error response
|
||||
err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
} else {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Headers: resp.Header,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
|
||||
}, err
|
||||
} else if statusCode >= 300 && statusCode <= 307 {
|
||||
// OSS use 3xx, but response has no body
|
||||
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Headers: resp.Header,
|
||||
Body: resp.Body,
|
||||
}, err
|
||||
} else {
|
||||
// (0,300) [308,400) [506,)
|
||||
// Other extended http StatusCode
|
||||
var respBody []byte
|
||||
respBody, err := readResponseBody(resp)
|
||||
if err != nil {
|
||||
return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
err = ServiceError{
|
||||
StatusCode: statusCode,
|
||||
RequestID: resp.Header.Get(HTTPHeaderOssRequestID),
|
||||
}
|
||||
} else {
|
||||
// Response contains storage service error object, unmarshal
|
||||
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
|
||||
resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
if errIn != nil { // error unmarshaling the error response
|
||||
err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
|
||||
} else {
|
||||
err = srvErr
|
||||
}
|
||||
}
|
||||
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Headers: resp.Header,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
|
||||
}, err
|
||||
}
|
||||
} else {
|
||||
if conn.config.IsEnableCRC && crc != nil {
|
||||
cliCRC = crc.Sum64()
|
||||
}
|
||||
srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
|
||||
realBody := resp.Body
|
||||
if conn.isDownloadLimitResponse(resp) {
|
||||
limitReader := &LimitSpeedReader{
|
||||
reader: realBody,
|
||||
ossLimiter: conn.config.DownloadLimiter,
|
||||
}
|
||||
realBody = limitReader
|
||||
}
|
||||
|
||||
// 2xx, successful
|
||||
return &Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Headers: resp.Header,
|
||||
Body: realBody,
|
||||
ClientCRC: cliCRC,
|
||||
ServerCRC: srvCRC,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// isUploadLimitReq: judge limit upload speed or not
|
||||
func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool {
|
||||
if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.EqualFold(resp.Request.Method, "GET") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// LoggerHTTPReq Print the header information of the http request
|
||||
func (conn Conn) LoggerHTTPReq(req *http.Request) {
|
||||
var logBuffer bytes.Buffer
|
||||
logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method))
|
||||
logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host))
|
||||
logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path))
|
||||
logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery))
|
||||
logBuffer.WriteString(fmt.Sprintf("Header info:"))
|
||||
|
||||
for k, v := range req.Header {
|
||||
var valueBuffer bytes.Buffer
|
||||
for j := 0; j < len(v); j++ {
|
||||
if j > 0 {
|
||||
valueBuffer.WriteString(" ")
|
||||
}
|
||||
valueBuffer.WriteString(v[j])
|
||||
}
|
||||
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String()))
|
||||
}
|
||||
conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
|
||||
}
|
||||
|
||||
// LoggerHTTPResp Print Response to http request
|
||||
func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) {
|
||||
var logBuffer bytes.Buffer
|
||||
logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode))
|
||||
logBuffer.WriteString(fmt.Sprintf("Header info:"))
|
||||
for k, v := range resp.Header {
|
||||
var valueBuffer bytes.Buffer
|
||||
for j := 0; j < len(v); j++ {
|
||||
if j > 0 {
|
||||
valueBuffer.WriteString(" ")
|
||||
}
|
||||
valueBuffer.WriteString(v[j])
|
||||
}
|
||||
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String()))
|
||||
}
|
||||
conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
|
||||
}
|
||||
|
||||
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
|
||||
if contentLen == 0 || contentLen > md5Threshold {
|
||||
// Huge body, use temporary file
|
||||
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
|
||||
if tempFile != nil {
|
||||
io.Copy(tempFile, body)
|
||||
tempFile.Seek(0, os.SEEK_SET)
|
||||
md5 := md5.New()
|
||||
io.Copy(md5, tempFile)
|
||||
sum := md5.Sum(nil)
|
||||
b64 = base64.StdEncoding.EncodeToString(sum[:])
|
||||
tempFile.Seek(0, os.SEEK_SET)
|
||||
reader = tempFile
|
||||
}
|
||||
} else {
|
||||
// Small body, use memory
|
||||
buf, _ := ioutil.ReadAll(body)
|
||||
sum := md5.Sum(buf)
|
||||
b64 = base64.StdEncoding.EncodeToString(sum[:])
|
||||
reader = bytes.NewReader(buf)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
|
||||
var storageErr ServiceError
|
||||
|
||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
||||
return storageErr, err
|
||||
}
|
||||
|
||||
storageErr.StatusCode = statusCode
|
||||
storageErr.RequestID = requestID
|
||||
storageErr.RawMessage = string(body)
|
||||
return storageErr, nil
|
||||
}
|
||||
|
||||
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return xml.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func jsonUnmarshal(body io.Reader, v interface{}) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// timeoutConn handles HTTP timeout
|
||||
type timeoutConn struct {
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
longTimeout time.Duration
|
||||
}
|
||||
|
||||
func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
|
||||
conn.SetReadDeadline(time.Now().Add(longTimeout))
|
||||
return &timeoutConn{
|
||||
conn: conn,
|
||||
timeout: timeout,
|
||||
longTimeout: longTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *timeoutConn) Read(b []byte) (n int, err error) {
|
||||
c.SetReadDeadline(time.Now().Add(c.timeout))
|
||||
n, err = c.conn.Read(b)
|
||||
c.SetReadDeadline(time.Now().Add(c.longTimeout))
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *timeoutConn) Write(b []byte) (n int, err error) {
|
||||
c.SetWriteDeadline(time.Now().Add(c.timeout))
|
||||
n, err = c.conn.Write(b)
|
||||
c.SetReadDeadline(time.Now().Add(c.longTimeout))
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *timeoutConn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *timeoutConn) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *timeoutConn) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (c *timeoutConn) SetDeadline(t time.Time) error {
|
||||
return c.conn.SetDeadline(t)
|
||||
}
|
||||
|
||||
func (c *timeoutConn) SetReadDeadline(t time.Time) error {
|
||||
return c.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
|
||||
return c.conn.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
// UrlMaker builds URL and resource
|
||||
const (
|
||||
urlTypeCname = 1
|
||||
urlTypeIP = 2
|
||||
urlTypeAliyun = 3
|
||||
)
|
||||
|
||||
type urlMaker struct {
|
||||
Scheme string // HTTP or HTTPS
|
||||
NetLoc string // Host or IP
|
||||
Type int // 1 CNAME, 2 IP, 3 ALIYUN
|
||||
IsProxy bool // Proxy
|
||||
}
|
||||
|
||||
// Init parses endpoint
|
||||
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error {
|
||||
if strings.HasPrefix(endpoint, "http://") {
|
||||
um.Scheme = "http"
|
||||
um.NetLoc = endpoint[len("http://"):]
|
||||
} else if strings.HasPrefix(endpoint, "https://") {
|
||||
um.Scheme = "https"
|
||||
um.NetLoc = endpoint[len("https://"):]
|
||||
} else {
|
||||
um.Scheme = "http"
|
||||
um.NetLoc = endpoint
|
||||
}
|
||||
|
||||
//use url.Parse() to get real host
|
||||
strUrl := um.Scheme + "://" + um.NetLoc
|
||||
url, err := url.Parse(strUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
um.NetLoc = url.Host
|
||||
host, _, err := net.SplitHostPort(um.NetLoc)
|
||||
if err != nil {
|
||||
host = um.NetLoc
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
um.Type = urlTypeIP
|
||||
} else if isCname {
|
||||
um.Type = urlTypeCname
|
||||
} else {
|
||||
um.Type = urlTypeAliyun
|
||||
}
|
||||
um.IsProxy = isProxy
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getURL gets URL
|
||||
func (um urlMaker) getURL(bucket, object, params string) *url.URL {
|
||||
host, path := um.buildURL(bucket, object)
|
||||
addr := ""
|
||||
if params == "" {
|
||||
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
|
||||
} else {
|
||||
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
|
||||
}
|
||||
uri, _ := url.ParseRequestURI(addr)
|
||||
return uri
|
||||
}
|
||||
|
||||
// getSignURL gets sign URL
|
||||
func (um urlMaker) getSignURL(bucket, object, params string) string {
|
||||
host, path := um.buildURL(bucket, object)
|
||||
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
|
||||
}
|
||||
|
||||
// getSignRtmpURL Build Sign Rtmp URL
|
||||
func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string {
|
||||
host, path := um.buildURL(bucket, "live")
|
||||
|
||||
channelName = url.QueryEscape(channelName)
|
||||
channelName = strings.Replace(channelName, "+", "%20", -1)
|
||||
|
||||
return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params)
|
||||
}
|
||||
|
||||
// buildURL builds URL
|
||||
func (um urlMaker) buildURL(bucket, object string) (string, string) {
|
||||
var host = ""
|
||||
var path = ""
|
||||
|
||||
object = url.QueryEscape(object)
|
||||
object = strings.Replace(object, "+", "%20", -1)
|
||||
|
||||
if um.Type == urlTypeCname {
|
||||
host = um.NetLoc
|
||||
path = "/" + object
|
||||
} else if um.Type == urlTypeIP {
|
||||
if bucket == "" {
|
||||
host = um.NetLoc
|
||||
path = "/"
|
||||
} else {
|
||||
host = um.NetLoc
|
||||
path = fmt.Sprintf("/%s/%s", bucket, object)
|
||||
}
|
||||
} else {
|
||||
if bucket == "" {
|
||||
host = um.NetLoc
|
||||
path = "/"
|
||||
} else {
|
||||
host = bucket + "." + um.NetLoc
|
||||
path = "/" + object
|
||||
}
|
||||
}
|
||||
|
||||
return host, path
|
||||
}
|
||||
|
||||
// buildURL builds URL
|
||||
func (um urlMaker) buildURLV4(bucket, object string) (string, string) {
|
||||
var host = ""
|
||||
var path = ""
|
||||
|
||||
object = url.QueryEscape(object)
|
||||
object = strings.Replace(object, "+", "%20", -1)
|
||||
|
||||
// no escape /
|
||||
object = strings.Replace(object, "%2F", "/", -1)
|
||||
|
||||
if um.Type == urlTypeCname {
|
||||
host = um.NetLoc
|
||||
path = "/" + object
|
||||
} else if um.Type == urlTypeIP {
|
||||
if bucket == "" {
|
||||
host = um.NetLoc
|
||||
path = "/"
|
||||
} else {
|
||||
host = um.NetLoc
|
||||
path = fmt.Sprintf("/%s/%s", bucket, object)
|
||||
}
|
||||
} else {
|
||||
if bucket == "" {
|
||||
host = um.NetLoc
|
||||
path = "/"
|
||||
} else {
|
||||
host = bucket + "." + um.NetLoc
|
||||
path = fmt.Sprintf("/%s/%s", bucket, object)
|
||||
}
|
||||
}
|
||||
return host, path
|
||||
}
|
@ -0,0 +1,264 @@
|
||||
package oss
|
||||
|
||||
import "os"
|
||||
|
||||
// ACLType bucket/object ACL
|
||||
type ACLType string
|
||||
|
||||
const (
|
||||
// ACLPrivate definition : private read and write
|
||||
ACLPrivate ACLType = "private"
|
||||
|
||||
// ACLPublicRead definition : public read and private write
|
||||
ACLPublicRead ACLType = "public-read"
|
||||
|
||||
// ACLPublicReadWrite definition : public read and public write
|
||||
ACLPublicReadWrite ACLType = "public-read-write"
|
||||
|
||||
// ACLDefault Object. It's only applicable for object.
|
||||
ACLDefault ACLType = "default"
|
||||
)
|
||||
|
||||
// bucket versioning status
|
||||
type VersioningStatus string
|
||||
|
||||
const (
|
||||
// Versioning Status definition: Enabled
|
||||
VersionEnabled VersioningStatus = "Enabled"
|
||||
|
||||
// Versioning Status definition: Suspended
|
||||
VersionSuspended VersioningStatus = "Suspended"
|
||||
)
|
||||
|
||||
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
|
||||
type MetadataDirectiveType string
|
||||
|
||||
const (
|
||||
// MetaCopy the target object's metadata is copied from the source one
|
||||
MetaCopy MetadataDirectiveType = "COPY"
|
||||
|
||||
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
|
||||
MetaReplace MetadataDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
|
||||
type TaggingDirectiveType string
|
||||
|
||||
const (
|
||||
// TaggingCopy the target object's tagging is copied from the source one
|
||||
TaggingCopy TaggingDirectiveType = "COPY"
|
||||
|
||||
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
|
||||
TaggingReplace TaggingDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// AlgorithmType specifying the server side encryption algorithm name
|
||||
type AlgorithmType string
|
||||
|
||||
const (
|
||||
KMSAlgorithm AlgorithmType = "KMS"
|
||||
AESAlgorithm AlgorithmType = "AES256"
|
||||
SM4Algorithm AlgorithmType = "SM4"
|
||||
)
|
||||
|
||||
// StorageClassType bucket storage type
|
||||
type StorageClassType string
|
||||
|
||||
const (
|
||||
// StorageStandard standard
|
||||
StorageStandard StorageClassType = "Standard"
|
||||
|
||||
// StorageIA infrequent access
|
||||
StorageIA StorageClassType = "IA"
|
||||
|
||||
// StorageArchive archive
|
||||
StorageArchive StorageClassType = "Archive"
|
||||
|
||||
// StorageColdArchive cold archive
|
||||
StorageColdArchive StorageClassType = "ColdArchive"
|
||||
)
|
||||
|
||||
//RedundancyType bucket data Redundancy type
|
||||
type DataRedundancyType string
|
||||
|
||||
const (
|
||||
// RedundancyLRS Local redundancy, default value
|
||||
RedundancyLRS DataRedundancyType = "LRS"
|
||||
|
||||
// RedundancyZRS Same city redundancy
|
||||
RedundancyZRS DataRedundancyType = "ZRS"
|
||||
)
|
||||
|
||||
//ObjecthashFuncType
|
||||
type ObjecthashFuncType string
|
||||
|
||||
const (
|
||||
HashFuncSha1 ObjecthashFuncType = "SHA-1"
|
||||
HashFuncSha256 ObjecthashFuncType = "SHA-256"
|
||||
)
|
||||
|
||||
// PayerType the type of request payer
|
||||
type PayerType string
|
||||
|
||||
const (
|
||||
// Requester the requester who send the request
|
||||
Requester PayerType = "Requester"
|
||||
|
||||
// BucketOwner the requester who send the request
|
||||
BucketOwner PayerType = "BucketOwner"
|
||||
)
|
||||
|
||||
//RestoreMode the restore mode for coldArchive object
|
||||
type RestoreMode string
|
||||
|
||||
const (
|
||||
//RestoreExpedited object will be restored in 1 hour
|
||||
RestoreExpedited RestoreMode = "Expedited"
|
||||
|
||||
//RestoreStandard object will be restored in 2-5 hours
|
||||
RestoreStandard RestoreMode = "Standard"
|
||||
|
||||
//RestoreBulk object will be restored in 5-10 hours
|
||||
RestoreBulk RestoreMode = "Bulk"
|
||||
)
|
||||
|
||||
// HTTPMethod HTTP request method
|
||||
type HTTPMethod string
|
||||
|
||||
const (
|
||||
// HTTPGet HTTP GET
|
||||
HTTPGet HTTPMethod = "GET"
|
||||
|
||||
// HTTPPut HTTP PUT
|
||||
HTTPPut HTTPMethod = "PUT"
|
||||
|
||||
// HTTPHead HTTP HEAD
|
||||
HTTPHead HTTPMethod = "HEAD"
|
||||
|
||||
// HTTPPost HTTP POST
|
||||
HTTPPost HTTPMethod = "POST"
|
||||
|
||||
// HTTPDelete HTTP DELETE
|
||||
HTTPDelete HTTPMethod = "DELETE"
|
||||
)
|
||||
|
||||
// HTTP headers
|
||||
const (
|
||||
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
|
||||
HTTPHeaderAuthorization = "Authorization"
|
||||
HTTPHeaderCacheControl = "Cache-Control"
|
||||
HTTPHeaderContentDisposition = "Content-Disposition"
|
||||
HTTPHeaderContentEncoding = "Content-Encoding"
|
||||
HTTPHeaderContentLength = "Content-Length"
|
||||
HTTPHeaderContentMD5 = "Content-MD5"
|
||||
HTTPHeaderContentType = "Content-Type"
|
||||
HTTPHeaderContentLanguage = "Content-Language"
|
||||
HTTPHeaderDate = "Date"
|
||||
HTTPHeaderEtag = "ETag"
|
||||
HTTPHeaderExpires = "Expires"
|
||||
HTTPHeaderHost = "Host"
|
||||
HTTPHeaderLastModified = "Last-Modified"
|
||||
HTTPHeaderRange = "Range"
|
||||
HTTPHeaderLocation = "Location"
|
||||
HTTPHeaderOrigin = "Origin"
|
||||
HTTPHeaderServer = "Server"
|
||||
HTTPHeaderUserAgent = "User-Agent"
|
||||
HTTPHeaderIfModifiedSince = "If-Modified-Since"
|
||||
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
HTTPHeaderIfMatch = "If-Match"
|
||||
HTTPHeaderIfNoneMatch = "If-None-Match"
|
||||
HTTPHeaderACReqMethod = "Access-Control-Request-Method"
|
||||
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
HTTPHeaderOssACL = "X-Oss-Acl"
|
||||
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
|
||||
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
|
||||
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
|
||||
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
|
||||
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
|
||||
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
|
||||
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
|
||||
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
|
||||
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
|
||||
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
|
||||
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
|
||||
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
|
||||
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
|
||||
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
|
||||
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
|
||||
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
|
||||
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
|
||||
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
|
||||
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
|
||||
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
|
||||
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
|
||||
HTTPHeaderOssCallback = "X-Oss-Callback"
|
||||
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
|
||||
HTTPHeaderOssRequester = "X-Oss-Request-Payer"
|
||||
HTTPHeaderOssTagging = "X-Oss-Tagging"
|
||||
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
|
||||
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
|
||||
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
|
||||
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
|
||||
HTTPHeaderOssTaskID = "X-Oss-Task-Id"
|
||||
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
|
||||
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
|
||||
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
|
||||
HttpHeaderOssDate = "X-Oss-Date"
|
||||
HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
|
||||
)
|
||||
|
||||
// HTTP Param
|
||||
const (
|
||||
HTTPParamExpires = "Expires"
|
||||
HTTPParamAccessKeyID = "OSSAccessKeyId"
|
||||
HTTPParamSignature = "Signature"
|
||||
HTTPParamSecurityToken = "security-token"
|
||||
HTTPParamPlaylistName = "playlistName"
|
||||
|
||||
HTTPParamSignatureVersion = "x-oss-signature-version"
|
||||
HTTPParamExpiresV2 = "x-oss-expires"
|
||||
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
|
||||
HTTPParamSignatureV2 = "x-oss-signature"
|
||||
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
|
||||
)
|
||||
|
||||
// Other constants
|
||||
const (
|
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
|
||||
MinPartSize = 100 * 1024 // Min part size, 100KB
|
||||
|
||||
FilePermMode = os.FileMode(0664) // Default file permission
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // Temp file prefix
|
||||
TempFileSuffix = ".temp" // Temp file suffix
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
|
||||
|
||||
NullVersion = "null"
|
||||
|
||||
DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
|
||||
|
||||
Version = "v2.2.4" // Go SDK version
|
||||
)
|
||||
|
||||
// FrameType
|
||||
const (
|
||||
DataFrameType = 8388609
|
||||
ContinuousFrameType = 8388612
|
||||
EndFrameType = 8388613
|
||||
MetaEndFrameCSVType = 8388614
|
||||
MetaEndFrameJSONType = 8388615
|
||||
)
|
||||
|
||||
// AuthVersion the version of auth
|
||||
type AuthVersionType string
|
||||
|
||||
const (
|
||||
// AuthV1 v1
|
||||
AuthV1 AuthVersionType = "v1"
|
||||
// AuthV2 v2
|
||||
AuthV2 AuthVersionType = "v2"
|
||||
// AuthV4 v4
|
||||
AuthV4 AuthVersionType = "v4"
|
||||
)
|
@ -0,0 +1,123 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint64
|
||||
tab *crc64.Table
|
||||
}
|
||||
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
|
||||
|
||||
// Size returns the number of bytes sum will return.
|
||||
func (d *digest) Size() int { return crc64.Size }
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
// Reset resets the hash to its initial state.
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
d.crc = crc64.Update(d.crc, d.tab, p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Sum64 returns CRC64 value.
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
// Sum returns hash value.
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// gf2Dim dimension of GF(2) vectors (length of CRC)
|
||||
const gf2Dim int = 64
|
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
for i := 0; vec != 0; i++ {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[i]
|
||||
}
|
||||
|
||||
vec >>= 1
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square []uint64, mat []uint64) {
|
||||
for n := 0; n < gf2Dim; n++ {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// CRC64Combine combines CRC64
|
||||
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
|
||||
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
|
||||
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
|
||||
|
||||
// Degenerate case
|
||||
if len2 == 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
// Put operator for one zero bit in odd
|
||||
odd[0] = crc64.ECMA // CRC64 polynomial
|
||||
var row uint64 = 1
|
||||
for n := 1; n < gf2Dim; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// Put operator for two zero bits in even
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
// Put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
|
||||
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
|
||||
for {
|
||||
// Apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even[:], odd[:])
|
||||
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(even[:], crc1)
|
||||
}
|
||||
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd[:], even[:])
|
||||
if len2&1 != 0 {
|
||||
crc1 = gf2MatrixTimes(odd[:], crc1)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return combined CRC
|
||||
crc1 ^= crc2
|
||||
return crc1
|
||||
}
|
@ -0,0 +1,567 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DownloadFile downloads files with multipart download.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// filePath the local file to download from objectKey in OSS.
|
||||
// partSize the part size in bytes.
|
||||
// options object's constraints, check out GetObject for the reference.
|
||||
//
|
||||
// error it's nil when the call succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < 1 {
|
||||
return errors.New("oss: part size smaller than 1")
|
||||
}
|
||||
|
||||
uRange, err := GetRangeConfig(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
|
||||
if cpFilePath != "" {
|
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
|
||||
}
|
||||
|
||||
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
absPath, _ := filepath.Abs(destFile)
|
||||
cpFileName := getCpFileName(src, absPath, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// downloadWorkerArg is download worker's parameters
|
||||
type downloadWorkerArg struct {
|
||||
bucket *Bucket
|
||||
key string
|
||||
filePath string
|
||||
options []Option
|
||||
hook downloadPartHook
|
||||
enableCRC bool
|
||||
}
|
||||
|
||||
// downloadPartHook is hook for test
|
||||
type downloadPartHook func(part downloadPart) error
|
||||
|
||||
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
|
||||
|
||||
func defaultDownloadPartHook(part downloadPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
|
||||
type defaultDownloadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
// downloadWorker
|
||||
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
|
||||
for part := range jobs {
|
||||
if err := arg.hook(part); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
// Resolve options
|
||||
r := Range(part.Start, part.End)
|
||||
p := Progress(&defaultDownloadProgressListener{})
|
||||
|
||||
var respHeader http.Header
|
||||
opts := make([]Option, len(arg.options)+3)
|
||||
// Append orderly, can not be reversed!
|
||||
opts = append(opts, arg.options...)
|
||||
opts = append(opts, r, p, GetResponseHeader(&respHeader))
|
||||
|
||||
rd, err := arg.bucket.GetObject(arg.key, opts...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
var crcCalc hash.Hash64
|
||||
if arg.enableCRC {
|
||||
crcCalc = crc64.New(CrcTable())
|
||||
contentLen := part.End - part.Start + 1
|
||||
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
_, err = io.Copy(fd, rd)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
|
||||
fd.Close()
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
|
||||
if arg.enableCRC {
|
||||
part.CRC64 = crcCalc.Sum64()
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// downloadScheduler
|
||||
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// downloadPart defines download part
|
||||
type downloadPart struct {
|
||||
Index int // Part number, starting from 0
|
||||
Start int64 // Start index
|
||||
End int64 // End index
|
||||
Offset int64 // Offset
|
||||
CRC64 uint64 // CRC check value of part
|
||||
}
|
||||
|
||||
// getDownloadParts gets download parts
|
||||
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
|
||||
parts := []downloadPart{}
|
||||
part := downloadPart{}
|
||||
i := 0
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
for offset := start; offset < end; offset += partSize {
|
||||
part.Index = i
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, end, partSize)
|
||||
part.Offset = start
|
||||
part.CRC64 = 0
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getObjectBytes gets object bytes length
|
||||
func getObjectBytes(parts []downloadPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// combineCRCInParts caculates the total CRC of continuous parts
|
||||
func combineCRCInParts(dps []downloadPart) uint64 {
|
||||
if dps == nil || len(dps) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
crc := dps[0].CRC64
|
||||
for i := 1; i < len(dps); i++ {
|
||||
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
|
||||
}
|
||||
|
||||
return crc
|
||||
}
|
||||
|
||||
// downloadFile downloads file concurrently without checkpoint.
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// If the file does not exist, create one. If exists, the download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enableCRC := false
|
||||
expectedCRC := (uint64)(0)
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
enableCRC = true
|
||||
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the parts of the file
|
||||
parts := getDownloadParts(objectSize, partSize, uRange)
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Download parts concurrently
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Waiting for parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
parts[part.Index].CRC64 = part.CRC64
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCRC {
|
||||
actualCRC := combineCRCInParts(parts)
|
||||
err = CheckDownloadCRC(actualCRC, expectedCRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
}
|
||||
|
||||
// ----- Concurrent download with chcekpoint -----
|
||||
|
||||
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
|
||||
|
||||
type downloadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint content MD5
|
||||
FilePath string // Local file
|
||||
Object string // Key
|
||||
ObjStat objectStat // Object status
|
||||
Parts []downloadPart // All download parts
|
||||
PartStat []bool // Parts' download status
|
||||
Start int64 // Start point of the file
|
||||
End int64 // End point of the file
|
||||
enableCRC bool // Whether has CRC check
|
||||
CRC uint64 // CRC check value
|
||||
}
|
||||
|
||||
type objectStat struct {
|
||||
Size int64 // Object size
|
||||
LastModified string // Last modified time
|
||||
Etag string // Etag
|
||||
}
|
||||
|
||||
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
|
||||
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
|
||||
// Compare the CP's Magic and the MD5
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size, last modified time and etag
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check the download range
|
||||
if uRange != nil {
|
||||
start, end := AdjustRange(uRange, objectSize)
|
||||
if start != cp.Start || end != cp.End {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load checkpoint from local file
|
||||
func (cp *downloadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump funciton dumps to file
|
||||
func (cp *downloadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialize
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts gets unfinished parts
|
||||
func (cp downloadCheckpoint) todoParts() []downloadPart {
|
||||
dps := []downloadPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes gets completed size
|
||||
func (cp downloadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initiates download tasks
|
||||
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
|
||||
// CP
|
||||
cp.Magic = downloadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.Object = objectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
|
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
|
||||
cp.enableCRC = true
|
||||
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// Parts
|
||||
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
|
||||
err := os.Rename(downFilepath, cp.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// downloadFileWithCp downloads files with checkpoint.
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
|
||||
tempFilePath := filePath + TempFileSuffix
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load checkpoint data.
|
||||
dcp := downloadCheckpoint{}
|
||||
err := dcp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange)
|
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or data invalid. Re-initialize the download.
|
||||
valid, err := dcp.isValid(meta, uRange)
|
||||
if err != nil || !valid {
|
||||
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Create the file if not exists. Otherwise the parts download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
|
||||
// Unfinished parts
|
||||
parts := dcp.todoParts()
|
||||
jobs := make(chan downloadPart, len(parts))
|
||||
results := make(chan downloadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := dcp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the download workers routine
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go downloadWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Concurrently downloads parts
|
||||
go downloadScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts download finished
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
dcp.PartStat[part.Index] = true
|
||||
dcp.Parts[part.Index].CRC64 = part.CRC64
|
||||
dcp.dump(cpFilePath)
|
||||
downBytes := (part.End - part.Start + 1)
|
||||
completedBytes += downBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if dcp.enableCRC {
|
||||
actualCRC := combineCRCInParts(dcp.Parts)
|
||||
err = CheckDownloadCRC(actualCRC, dcp.CRC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return dcp.complete(cpFilePath, tempFilePath)
|
||||
}
|
@ -0,0 +1,94 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ServiceError contains fields of the error response from Oss Service REST API.
|
||||
type ServiceError struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"` // The error code returned from OSS to the caller
|
||||
Message string `xml:"Message"` // The detail error message from OSS
|
||||
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
|
||||
HostID string `xml:"HostId"` // The OSS server cluster's Id
|
||||
Endpoint string `xml:"Endpoint"`
|
||||
RawMessage string // The raw messages from OSS
|
||||
StatusCode int // HTTP status code
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e ServiceError) Error() string {
|
||||
if e.Endpoint == "" {
|
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
}
|
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int // The expected HTTP stats code returned from OSS
|
||||
got int // The actual HTTP status code from OSS
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
|
||||
got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// Got is the actual status code returned by oss.
|
||||
func (e UnexpectedStatusCodeError) Got() int {
|
||||
return e.got
|
||||
}
|
||||
|
||||
// CheckRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func CheckRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
||||
|
||||
// CRCCheckError is returned when crc check is inconsistent between client and server
|
||||
type CRCCheckError struct {
|
||||
clientCRC uint64 // Calculated CRC64 in client
|
||||
serverCRC uint64 // Calculated CRC64 in server
|
||||
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
|
||||
requestID string // The request id of this operation
|
||||
}
|
||||
|
||||
// Error implements interface error
|
||||
func (e CRCCheckError) Error() string {
|
||||
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
|
||||
e.operation, e.clientCRC, e.serverCRC, e.requestID)
|
||||
}
|
||||
|
||||
func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
|
||||
if clientCRC == serverCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
|
||||
}
|
||||
|
||||
func CheckCRC(resp *Response, operation string) error {
|
||||
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
|
||||
return nil
|
||||
}
|
||||
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
// +build !go1.7
|
||||
|
||||
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
|
||||
// this file is only for build,not supports limit upload speed
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
type OssLimiter struct {
|
||||
}
|
||||
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
err = fmt.Errorf("rate.Limiter is not supported below version go1.7")
|
||||
return nil, err
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
perTokenBandwidthSize int = 1024
|
||||
)
|
||||
|
||||
// OssLimiter wrapper rate.Limiter
|
||||
type OssLimiter struct {
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
// GetOssLimiter create OssLimiter
|
||||
// uploadSpeed KB/s
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
|
||||
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
|
||||
|
||||
// first consume the initial full token,the limiter will behave more accurately
|
||||
limiter.AllowN(time.Now(), uploadSpeed)
|
||||
|
||||
return &OssLimiter{
|
||||
limiter: limiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LimitSpeedReader for limit bandwidth upload
|
||||
type LimitSpeedReader struct {
|
||||
io.ReadCloser
|
||||
reader io.Reader
|
||||
ossLimiter *OssLimiter
|
||||
}
|
||||
|
||||
// Read
|
||||
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
|
||||
n = 0
|
||||
err = nil
|
||||
start := 0
|
||||
burst := r.ossLimiter.limiter.Burst()
|
||||
var end int
|
||||
var tmpN int
|
||||
var tc int
|
||||
for start < len(p) {
|
||||
if start+burst*perTokenBandwidthSize < len(p) {
|
||||
end = start + burst*perTokenBandwidthSize
|
||||
} else {
|
||||
end = len(p)
|
||||
}
|
||||
|
||||
tmpN, err = r.reader.Read(p[start:end])
|
||||
if tmpN > 0 {
|
||||
n += tmpN
|
||||
start = n
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize)))
|
||||
now := time.Now()
|
||||
re := r.ossLimiter.limiter.ReserveN(now, tc)
|
||||
if !re.OK() {
|
||||
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
|
||||
start, end, burst, perTokenBandwidthSize)
|
||||
return
|
||||
}
|
||||
timeDelay := re.Delay()
|
||||
time.Sleep(timeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close ...
|
||||
func (r *LimitSpeedReader) Close() error {
|
||||
rc, ok := r.reader.(io.ReadCloser)
|
||||
if ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,257 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
//
|
||||
// CreateLiveChannel create a live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// config configuration of the channel
|
||||
//
|
||||
// CreateLiveChannelResult the result of create live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) {
|
||||
var out CreateLiveChannelResult
|
||||
|
||||
bs, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// status enabled/disabled
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["status"] = status
|
||||
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
key := fmt.Sprintf("%s/%s", channelName, playlistName)
|
||||
resp, err := bucket.do("POST", key, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
params["vod"] = nil
|
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
|
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelStat Get the state of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelStat the state of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) {
|
||||
var out LiveChannelStat
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "stat"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelInfo Get the configuration info of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelConfiguration the configuration info of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) {
|
||||
var out LiveChannelConfiguration
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// GetLiveChannelHistory Get push records of live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelHistory push records
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) {
|
||||
var out LiveChannelHistory
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
params["comp"] = "history"
|
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// ListLiveChannel list the live-channels
|
||||
//
|
||||
// options Prefix: filter by the name start with the value of "Prefix"
|
||||
// MaxKeys: the maximum count returned
|
||||
// Marker: cursor from which starting list
|
||||
//
|
||||
// ListLiveChannelResult live-channel list
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
|
||||
var out ListLiveChannelResult
|
||||
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["live"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", "", params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
//
|
||||
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) DeleteLiveChannel(channelName string) error {
|
||||
params := map[string]interface{}{}
|
||||
params["live"] = nil
|
||||
|
||||
if channelName == "" {
|
||||
return fmt.Errorf("invalid argument: channel name is empty")
|
||||
}
|
||||
|
||||
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
//
|
||||
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// expires expiration (in seconds)
|
||||
//
|
||||
// string singed rtmp push stream url
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) {
|
||||
if expires <= 0 {
|
||||
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires)
|
||||
}
|
||||
expiration := time.Now().Unix() + expires
|
||||
|
||||
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil
|
||||
}
|
@ -0,0 +1,572 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var extToMimeType = map[string]string{
|
||||
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
|
||||
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
|
||||
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
|
||||
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
|
||||
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
|
||||
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
|
||||
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
|
||||
".apk": "application/vnd.android.package-archive",
|
||||
".hqx": "application/mac-binhex40",
|
||||
".cpt": "application/mac-compactpro",
|
||||
".doc": "application/msword",
|
||||
".ogg": "application/ogg",
|
||||
".pdf": "application/pdf",
|
||||
".rtf": "text/rtf",
|
||||
".mif": "application/vnd.mif",
|
||||
".xls": "application/vnd.ms-excel",
|
||||
".ppt": "application/vnd.ms-powerpoint",
|
||||
".odc": "application/vnd.oasis.opendocument.chart",
|
||||
".odb": "application/vnd.oasis.opendocument.database",
|
||||
".odf": "application/vnd.oasis.opendocument.formula",
|
||||
".odg": "application/vnd.oasis.opendocument.graphics",
|
||||
".otg": "application/vnd.oasis.opendocument.graphics-template",
|
||||
".odi": "application/vnd.oasis.opendocument.image",
|
||||
".odp": "application/vnd.oasis.opendocument.presentation",
|
||||
".otp": "application/vnd.oasis.opendocument.presentation-template",
|
||||
".ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
|
||||
".odt": "application/vnd.oasis.opendocument.text",
|
||||
".odm": "application/vnd.oasis.opendocument.text-master",
|
||||
".ott": "application/vnd.oasis.opendocument.text-template",
|
||||
".oth": "application/vnd.oasis.opendocument.text-web",
|
||||
".sxw": "application/vnd.sun.xml.writer",
|
||||
".stw": "application/vnd.sun.xml.writer.template",
|
||||
".sxc": "application/vnd.sun.xml.calc",
|
||||
".stc": "application/vnd.sun.xml.calc.template",
|
||||
".sxd": "application/vnd.sun.xml.draw",
|
||||
".std": "application/vnd.sun.xml.draw.template",
|
||||
".sxi": "application/vnd.sun.xml.impress",
|
||||
".sti": "application/vnd.sun.xml.impress.template",
|
||||
".sxg": "application/vnd.sun.xml.writer.global",
|
||||
".sxm": "application/vnd.sun.xml.math",
|
||||
".sis": "application/vnd.symbian.install",
|
||||
".wbxml": "application/vnd.wap.wbxml",
|
||||
".wmlc": "application/vnd.wap.wmlc",
|
||||
".wmlsc": "application/vnd.wap.wmlscriptc",
|
||||
".bcpio": "application/x-bcpio",
|
||||
".torrent": "application/x-bittorrent",
|
||||
".bz2": "application/x-bzip2",
|
||||
".vcd": "application/x-cdlink",
|
||||
".pgn": "application/x-chess-pgn",
|
||||
".cpio": "application/x-cpio",
|
||||
".csh": "application/x-csh",
|
||||
".dvi": "application/x-dvi",
|
||||
".spl": "application/x-futuresplash",
|
||||
".gtar": "application/x-gtar",
|
||||
".hdf": "application/x-hdf",
|
||||
".jar": "application/x-java-archive",
|
||||
".jnlp": "application/x-java-jnlp-file",
|
||||
".js": "application/x-javascript",
|
||||
".ksp": "application/x-kspread",
|
||||
".chrt": "application/x-kchart",
|
||||
".kil": "application/x-killustrator",
|
||||
".latex": "application/x-latex",
|
||||
".rpm": "application/x-rpm",
|
||||
".sh": "application/x-sh",
|
||||
".shar": "application/x-shar",
|
||||
".swf": "application/x-shockwave-flash",
|
||||
".sit": "application/x-stuffit",
|
||||
".sv4cpio": "application/x-sv4cpio",
|
||||
".sv4crc": "application/x-sv4crc",
|
||||
".tar": "application/x-tar",
|
||||
".tcl": "application/x-tcl",
|
||||
".tex": "application/x-tex",
|
||||
".man": "application/x-troff-man",
|
||||
".me": "application/x-troff-me",
|
||||
".ms": "application/x-troff-ms",
|
||||
".ustar": "application/x-ustar",
|
||||
".src": "application/x-wais-source",
|
||||
".zip": "application/zip",
|
||||
".m3u": "audio/x-mpegurl",
|
||||
".ra": "audio/x-pn-realaudio",
|
||||
".wav": "audio/x-wav",
|
||||
".wma": "audio/x-ms-wma",
|
||||
".wax": "audio/x-ms-wax",
|
||||
".pdb": "chemical/x-pdb",
|
||||
".xyz": "chemical/x-xyz",
|
||||
".bmp": "image/bmp",
|
||||
".gif": "image/gif",
|
||||
".ief": "image/ief",
|
||||
".png": "image/png",
|
||||
".wbmp": "image/vnd.wap.wbmp",
|
||||
".ras": "image/x-cmu-raster",
|
||||
".pnm": "image/x-portable-anymap",
|
||||
".pbm": "image/x-portable-bitmap",
|
||||
".pgm": "image/x-portable-graymap",
|
||||
".ppm": "image/x-portable-pixmap",
|
||||
".rgb": "image/x-rgb",
|
||||
".xbm": "image/x-xbitmap",
|
||||
".xpm": "image/x-xpixmap",
|
||||
".xwd": "image/x-xwindowdump",
|
||||
".css": "text/css",
|
||||
".rtx": "text/richtext",
|
||||
".tsv": "text/tab-separated-values",
|
||||
".jad": "text/vnd.sun.j2me.app-descriptor",
|
||||
".wml": "text/vnd.wap.wml",
|
||||
".wmls": "text/vnd.wap.wmlscript",
|
||||
".etx": "text/x-setext",
|
||||
".mxu": "video/vnd.mpegurl",
|
||||
".flv": "video/x-flv",
|
||||
".wm": "video/x-ms-wm",
|
||||
".wmv": "video/x-ms-wmv",
|
||||
".wmx": "video/x-ms-wmx",
|
||||
".wvx": "video/x-ms-wvx",
|
||||
".avi": "video/x-msvideo",
|
||||
".movie": "video/x-sgi-movie",
|
||||
".ice": "x-conference/x-cooltalk",
|
||||
".3gp": "video/3gpp",
|
||||
".ai": "application/postscript",
|
||||
".aif": "audio/x-aiff",
|
||||
".aifc": "audio/x-aiff",
|
||||
".aiff": "audio/x-aiff",
|
||||
".asc": "text/plain",
|
||||
".atom": "application/atom+xml",
|
||||
".au": "audio/basic",
|
||||
".bin": "application/octet-stream",
|
||||
".cdf": "application/x-netcdf",
|
||||
".cgm": "image/cgm",
|
||||
".class": "application/octet-stream",
|
||||
".dcr": "application/x-director",
|
||||
".dif": "video/x-dv",
|
||||
".dir": "application/x-director",
|
||||
".djv": "image/vnd.djvu",
|
||||
".djvu": "image/vnd.djvu",
|
||||
".dll": "application/octet-stream",
|
||||
".dmg": "application/octet-stream",
|
||||
".dms": "application/octet-stream",
|
||||
".dtd": "application/xml-dtd",
|
||||
".dv": "video/x-dv",
|
||||
".dxr": "application/x-director",
|
||||
".eps": "application/postscript",
|
||||
".exe": "application/octet-stream",
|
||||
".ez": "application/andrew-inset",
|
||||
".gram": "application/srgs",
|
||||
".grxml": "application/srgs+xml",
|
||||
".gz": "application/x-gzip",
|
||||
".htm": "text/html",
|
||||
".html": "text/html",
|
||||
".ico": "image/x-icon",
|
||||
".ics": "text/calendar",
|
||||
".ifb": "text/calendar",
|
||||
".iges": "model/iges",
|
||||
".igs": "model/iges",
|
||||
".jp2": "image/jp2",
|
||||
".jpe": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".jpg": "image/jpeg",
|
||||
".kar": "audio/midi",
|
||||
".lha": "application/octet-stream",
|
||||
".lzh": "application/octet-stream",
|
||||
".m4a": "audio/mp4a-latm",
|
||||
".m4p": "audio/mp4a-latm",
|
||||
".m4u": "video/vnd.mpegurl",
|
||||
".m4v": "video/x-m4v",
|
||||
".mac": "image/x-macpaint",
|
||||
".mathml": "application/mathml+xml",
|
||||
".mesh": "model/mesh",
|
||||
".mid": "audio/midi",
|
||||
".midi": "audio/midi",
|
||||
".mov": "video/quicktime",
|
||||
".mp2": "audio/mpeg",
|
||||
".mp3": "audio/mpeg",
|
||||
".mp4": "video/mp4",
|
||||
".mpe": "video/mpeg",
|
||||
".mpeg": "video/mpeg",
|
||||
".mpg": "video/mpeg",
|
||||
".mpga": "audio/mpeg",
|
||||
".msh": "model/mesh",
|
||||
".nc": "application/x-netcdf",
|
||||
".oda": "application/oda",
|
||||
".ogv": "video/ogv",
|
||||
".pct": "image/pict",
|
||||
".pic": "image/pict",
|
||||
".pict": "image/pict",
|
||||
".pnt": "image/x-macpaint",
|
||||
".pntg": "image/x-macpaint",
|
||||
".ps": "application/postscript",
|
||||
".qt": "video/quicktime",
|
||||
".qti": "image/x-quicktime",
|
||||
".qtif": "image/x-quicktime",
|
||||
".ram": "audio/x-pn-realaudio",
|
||||
".rdf": "application/rdf+xml",
|
||||
".rm": "application/vnd.rn-realmedia",
|
||||
".roff": "application/x-troff",
|
||||
".sgm": "text/sgml",
|
||||
".sgml": "text/sgml",
|
||||
".silo": "model/mesh",
|
||||
".skd": "application/x-koan",
|
||||
".skm": "application/x-koan",
|
||||
".skp": "application/x-koan",
|
||||
".skt": "application/x-koan",
|
||||
".smi": "application/smil",
|
||||
".smil": "application/smil",
|
||||
".snd": "audio/basic",
|
||||
".so": "application/octet-stream",
|
||||
".svg": "image/svg+xml",
|
||||
".t": "application/x-troff",
|
||||
".texi": "application/x-texinfo",
|
||||
".texinfo": "application/x-texinfo",
|
||||
".tif": "image/tiff",
|
||||
".tiff": "image/tiff",
|
||||
".tr": "application/x-troff",
|
||||
".txt": "text/plain",
|
||||
".vrml": "model/vrml",
|
||||
".vxml": "application/voicexml+xml",
|
||||
".webm": "video/webm",
|
||||
".wrl": "model/vrml",
|
||||
".xht": "application/xhtml+xml",
|
||||
".xhtml": "application/xhtml+xml",
|
||||
".xml": "application/xml",
|
||||
".xsl": "application/xml",
|
||||
".xslt": "application/xslt+xml",
|
||||
".xul": "application/vnd.mozilla.xul+xml",
|
||||
".webp": "image/webp",
|
||||
".323": "text/h323",
|
||||
".aab": "application/x-authoware-bin",
|
||||
".aam": "application/x-authoware-map",
|
||||
".aas": "application/x-authoware-seg",
|
||||
".acx": "application/internet-property-stream",
|
||||
".als": "audio/X-Alpha5",
|
||||
".amc": "application/x-mpeg",
|
||||
".ani": "application/octet-stream",
|
||||
".asd": "application/astound",
|
||||
".asf": "video/x-ms-asf",
|
||||
".asn": "application/astound",
|
||||
".asp": "application/x-asap",
|
||||
".asr": "video/x-ms-asf",
|
||||
".asx": "video/x-ms-asf",
|
||||
".avb": "application/octet-stream",
|
||||
".awb": "audio/amr-wb",
|
||||
".axs": "application/olescript",
|
||||
".bas": "text/plain",
|
||||
".bin ": "application/octet-stream",
|
||||
".bld": "application/bld",
|
||||
".bld2": "application/bld2",
|
||||
".bpk": "application/octet-stream",
|
||||
".c": "text/plain",
|
||||
".cal": "image/x-cals",
|
||||
".cat": "application/vnd.ms-pkiseccat",
|
||||
".ccn": "application/x-cnc",
|
||||
".cco": "application/x-cocoa",
|
||||
".cer": "application/x-x509-ca-cert",
|
||||
".cgi": "magnus-internal/cgi",
|
||||
".chat": "application/x-chat",
|
||||
".clp": "application/x-msclip",
|
||||
".cmx": "image/x-cmx",
|
||||
".co": "application/x-cult3d-object",
|
||||
".cod": "image/cis-cod",
|
||||
".conf": "text/plain",
|
||||
".cpp": "text/plain",
|
||||
".crd": "application/x-mscardfile",
|
||||
".crl": "application/pkix-crl",
|
||||
".crt": "application/x-x509-ca-cert",
|
||||
".csm": "chemical/x-csml",
|
||||
".csml": "chemical/x-csml",
|
||||
".cur": "application/octet-stream",
|
||||
".dcm": "x-lml/x-evm",
|
||||
".dcx": "image/x-dcx",
|
||||
".der": "application/x-x509-ca-cert",
|
||||
".dhtml": "text/html",
|
||||
".dot": "application/msword",
|
||||
".dwf": "drawing/x-dwf",
|
||||
".dwg": "application/x-autocad",
|
||||
".dxf": "application/x-autocad",
|
||||
".ebk": "application/x-expandedbook",
|
||||
".emb": "chemical/x-embl-dl-nucleotide",
|
||||
".embl": "chemical/x-embl-dl-nucleotide",
|
||||
".epub": "application/epub+zip",
|
||||
".eri": "image/x-eri",
|
||||
".es": "audio/echospeech",
|
||||
".esl": "audio/echospeech",
|
||||
".etc": "application/x-earthtime",
|
||||
".evm": "x-lml/x-evm",
|
||||
".evy": "application/envoy",
|
||||
".fh4": "image/x-freehand",
|
||||
".fh5": "image/x-freehand",
|
||||
".fhc": "image/x-freehand",
|
||||
".fif": "application/fractals",
|
||||
".flr": "x-world/x-vrml",
|
||||
".fm": "application/x-maker",
|
||||
".fpx": "image/x-fpx",
|
||||
".fvi": "video/isivideo",
|
||||
".gau": "chemical/x-gaussian-input",
|
||||
".gca": "application/x-gca-compressed",
|
||||
".gdb": "x-lml/x-gdb",
|
||||
".gps": "application/x-gps",
|
||||
".h": "text/plain",
|
||||
".hdm": "text/x-hdml",
|
||||
".hdml": "text/x-hdml",
|
||||
".hlp": "application/winhlp",
|
||||
".hta": "application/hta",
|
||||
".htc": "text/x-component",
|
||||
".hts": "text/html",
|
||||
".htt": "text/webviewhtml",
|
||||
".ifm": "image/gif",
|
||||
".ifs": "image/ifs",
|
||||
".iii": "application/x-iphone",
|
||||
".imy": "audio/melody",
|
||||
".ins": "application/x-internet-signup",
|
||||
".ips": "application/x-ipscript",
|
||||
".ipx": "application/x-ipix",
|
||||
".isp": "application/x-internet-signup",
|
||||
".it": "audio/x-mod",
|
||||
".itz": "audio/x-mod",
|
||||
".ivr": "i-world/i-vrml",
|
||||
".j2k": "image/j2k",
|
||||
".jam": "application/x-jam",
|
||||
".java": "text/plain",
|
||||
".jfif": "image/pipeg",
|
||||
".jpz": "image/jpeg",
|
||||
".jwc": "application/jwc",
|
||||
".kjx": "application/x-kjx",
|
||||
".lak": "x-lml/x-lak",
|
||||
".lcc": "application/fastman",
|
||||
".lcl": "application/x-digitalloca",
|
||||
".lcr": "application/x-digitalloca",
|
||||
".lgh": "application/lgh",
|
||||
".lml": "x-lml/x-lml",
|
||||
".lmlpack": "x-lml/x-lmlpack",
|
||||
".log": "text/plain",
|
||||
".lsf": "video/x-la-asf",
|
||||
".lsx": "video/x-la-asf",
|
||||
".m13": "application/x-msmediaview",
|
||||
".m14": "application/x-msmediaview",
|
||||
".m15": "audio/x-mod",
|
||||
".m3url": "audio/x-mpegurl",
|
||||
".m4b": "audio/mp4a-latm",
|
||||
".ma1": "audio/ma1",
|
||||
".ma2": "audio/ma2",
|
||||
".ma3": "audio/ma3",
|
||||
".ma5": "audio/ma5",
|
||||
".map": "magnus-internal/imagemap",
|
||||
".mbd": "application/mbedlet",
|
||||
".mct": "application/x-mascot",
|
||||
".mdb": "application/x-msaccess",
|
||||
".mdz": "audio/x-mod",
|
||||
".mel": "text/x-vmel",
|
||||
".mht": "message/rfc822",
|
||||
".mhtml": "message/rfc822",
|
||||
".mi": "application/x-mif",
|
||||
".mil": "image/x-cals",
|
||||
".mio": "audio/x-mio",
|
||||
".mmf": "application/x-skt-lbs",
|
||||
".mng": "video/x-mng",
|
||||
".mny": "application/x-msmoney",
|
||||
".moc": "application/x-mocha",
|
||||
".mocha": "application/x-mocha",
|
||||
".mod": "audio/x-mod",
|
||||
".mof": "application/x-yumekara",
|
||||
".mol": "chemical/x-mdl-molfile",
|
||||
".mop": "chemical/x-mopac-input",
|
||||
".mpa": "video/mpeg",
|
||||
".mpc": "application/vnd.mpohun.certificate",
|
||||
".mpg4": "video/mp4",
|
||||
".mpn": "application/vnd.mophun.application",
|
||||
".mpp": "application/vnd.ms-project",
|
||||
".mps": "application/x-mapserver",
|
||||
".mpv2": "video/mpeg",
|
||||
".mrl": "text/x-mrml",
|
||||
".mrm": "application/x-mrm",
|
||||
".msg": "application/vnd.ms-outlook",
|
||||
".mts": "application/metastream",
|
||||
".mtx": "application/metastream",
|
||||
".mtz": "application/metastream",
|
||||
".mvb": "application/x-msmediaview",
|
||||
".mzv": "application/metastream",
|
||||
".nar": "application/zip",
|
||||
".nbmp": "image/nbmp",
|
||||
".ndb": "x-lml/x-ndb",
|
||||
".ndwn": "application/ndwn",
|
||||
".nif": "application/x-nif",
|
||||
".nmz": "application/x-scream",
|
||||
".nokia-op-logo": "image/vnd.nok-oplogo-color",
|
||||
".npx": "application/x-netfpx",
|
||||
".nsnd": "audio/nsnd",
|
||||
".nva": "application/x-neva1",
|
||||
".nws": "message/rfc822",
|
||||
".oom": "application/x-AtlasMate-Plugin",
|
||||
".p10": "application/pkcs10",
|
||||
".p12": "application/x-pkcs12",
|
||||
".p7b": "application/x-pkcs7-certificates",
|
||||
".p7c": "application/x-pkcs7-mime",
|
||||
".p7m": "application/x-pkcs7-mime",
|
||||
".p7r": "application/x-pkcs7-certreqresp",
|
||||
".p7s": "application/x-pkcs7-signature",
|
||||
".pac": "audio/x-pac",
|
||||
".pae": "audio/x-epac",
|
||||
".pan": "application/x-pan",
|
||||
".pcx": "image/x-pcx",
|
||||
".pda": "image/x-pda",
|
||||
".pfr": "application/font-tdpfr",
|
||||
".pfx": "application/x-pkcs12",
|
||||
".pko": "application/ynd.ms-pkipko",
|
||||
".pm": "application/x-perl",
|
||||
".pma": "application/x-perfmon",
|
||||
".pmc": "application/x-perfmon",
|
||||
".pmd": "application/x-pmd",
|
||||
".pml": "application/x-perfmon",
|
||||
".pmr": "application/x-perfmon",
|
||||
".pmw": "application/x-perfmon",
|
||||
".pnz": "image/png",
|
||||
".pot,": "application/vnd.ms-powerpoint",
|
||||
".pps": "application/vnd.ms-powerpoint",
|
||||
".pqf": "application/x-cprplayer",
|
||||
".pqi": "application/cprplayer",
|
||||
".prc": "application/x-prc",
|
||||
".prf": "application/pics-rules",
|
||||
".prop": "text/plain",
|
||||
".proxy": "application/x-ns-proxy-autoconfig",
|
||||
".ptlk": "application/listenup",
|
||||
".pub": "application/x-mspublisher",
|
||||
".pvx": "video/x-pv-pvx",
|
||||
".qcp": "audio/vnd.qcelp",
|
||||
".r3t": "text/vnd.rn-realtext3d",
|
||||
".rar": "application/octet-stream",
|
||||
".rc": "text/plain",
|
||||
".rf": "image/vnd.rn-realflash",
|
||||
".rlf": "application/x-richlink",
|
||||
".rmf": "audio/x-rmf",
|
||||
".rmi": "audio/mid",
|
||||
".rmm": "audio/x-pn-realaudio",
|
||||
".rmvb": "audio/x-pn-realaudio",
|
||||
".rnx": "application/vnd.rn-realplayer",
|
||||
".rp": "image/vnd.rn-realpix",
|
||||
".rt": "text/vnd.rn-realtext",
|
||||
".rte": "x-lml/x-gps",
|
||||
".rtg": "application/metastream",
|
||||
".rv": "video/vnd.rn-realvideo",
|
||||
".rwc": "application/x-rogerwilco",
|
||||
".s3m": "audio/x-mod",
|
||||
".s3z": "audio/x-mod",
|
||||
".sca": "application/x-supercard",
|
||||
".scd": "application/x-msschedule",
|
||||
".sct": "text/scriptlet",
|
||||
".sdf": "application/e-score",
|
||||
".sea": "application/x-stuffit",
|
||||
".setpay": "application/set-payment-initiation",
|
||||
".setreg": "application/set-registration-initiation",
|
||||
".shtml": "text/html",
|
||||
".shtm": "text/html",
|
||||
".shw": "application/presentations",
|
||||
".si6": "image/si6",
|
||||
".si7": "image/vnd.stiwap.sis",
|
||||
".si9": "image/vnd.lgtwap.sis",
|
||||
".slc": "application/x-salsa",
|
||||
".smd": "audio/x-smd",
|
||||
".smp": "application/studiom",
|
||||
".smz": "audio/x-smd",
|
||||
".spc": "application/x-pkcs7-certificates",
|
||||
".spr": "application/x-sprite",
|
||||
".sprite": "application/x-sprite",
|
||||
".sdp": "application/sdp",
|
||||
".spt": "application/x-spt",
|
||||
".sst": "application/vnd.ms-pkicertstore",
|
||||
".stk": "application/hyperstudio",
|
||||
".stl": "application/vnd.ms-pkistl",
|
||||
".stm": "text/html",
|
||||
".svf": "image/vnd",
|
||||
".svh": "image/svh",
|
||||
".svr": "x-world/x-svr",
|
||||
".swfl": "application/x-shockwave-flash",
|
||||
".tad": "application/octet-stream",
|
||||
".talk": "text/x-speech",
|
||||
".taz": "application/x-tar",
|
||||
".tbp": "application/x-timbuktu",
|
||||
".tbt": "application/x-timbuktu",
|
||||
".tgz": "application/x-compressed",
|
||||
".thm": "application/vnd.eri.thm",
|
||||
".tki": "application/x-tkined",
|
||||
".tkined": "application/x-tkined",
|
||||
".toc": "application/toc",
|
||||
".toy": "image/toy",
|
||||
".trk": "x-lml/x-gps",
|
||||
".trm": "application/x-msterminal",
|
||||
".tsi": "audio/tsplayer",
|
||||
".tsp": "application/dsptype",
|
||||
".ttf": "application/octet-stream",
|
||||
".ttz": "application/t-time",
|
||||
".uls": "text/iuls",
|
||||
".ult": "audio/x-mod",
|
||||
".uu": "application/x-uuencode",
|
||||
".uue": "application/x-uuencode",
|
||||
".vcf": "text/x-vcard",
|
||||
".vdo": "video/vdo",
|
||||
".vib": "audio/vib",
|
||||
".viv": "video/vivo",
|
||||
".vivo": "video/vivo",
|
||||
".vmd": "application/vocaltec-media-desc",
|
||||
".vmf": "application/vocaltec-media-file",
|
||||
".vmi": "application/x-dreamcast-vms-info",
|
||||
".vms": "application/x-dreamcast-vms",
|
||||
".vox": "audio/voxware",
|
||||
".vqe": "audio/x-twinvq-plugin",
|
||||
".vqf": "audio/x-twinvq",
|
||||
".vql": "audio/x-twinvq",
|
||||
".vre": "x-world/x-vream",
|
||||
".vrt": "x-world/x-vrt",
|
||||
".vrw": "x-world/x-vream",
|
||||
".vts": "workbook/formulaone",
|
||||
".wcm": "application/vnd.ms-works",
|
||||
".wdb": "application/vnd.ms-works",
|
||||
".web": "application/vnd.xara",
|
||||
".wi": "image/wavelet",
|
||||
".wis": "application/x-InstallShield",
|
||||
".wks": "application/vnd.ms-works",
|
||||
".wmd": "application/x-ms-wmd",
|
||||
".wmf": "application/x-msmetafile",
|
||||
".wmlscript": "text/vnd.wap.wmlscript",
|
||||
".wmz": "application/x-ms-wmz",
|
||||
".wpng": "image/x-up-wpng",
|
||||
".wps": "application/vnd.ms-works",
|
||||
".wpt": "x-lml/x-gps",
|
||||
".wri": "application/x-mswrite",
|
||||
".wrz": "x-world/x-vrml",
|
||||
".ws": "text/vnd.wap.wmlscript",
|
||||
".wsc": "application/vnd.wap.wmlscriptc",
|
||||
".wv": "video/wavelet",
|
||||
".wxl": "application/x-wxl",
|
||||
".x-gzip": "application/x-gzip",
|
||||
".xaf": "x-world/x-vrml",
|
||||
".xar": "application/vnd.xara",
|
||||
".xdm": "application/x-xdma",
|
||||
".xdma": "application/x-xdma",
|
||||
".xdw": "application/vnd.fujixerox.docuworks",
|
||||
".xhtm": "application/xhtml+xml",
|
||||
".xla": "application/vnd.ms-excel",
|
||||
".xlc": "application/vnd.ms-excel",
|
||||
".xll": "application/x-excel",
|
||||
".xlm": "application/vnd.ms-excel",
|
||||
".xlt": "application/vnd.ms-excel",
|
||||
".xlw": "application/vnd.ms-excel",
|
||||
".xm": "audio/x-mod",
|
||||
".xmz": "audio/x-mod",
|
||||
".xof": "x-world/x-vrml",
|
||||
".xpi": "application/x-xpinstall",
|
||||
".xsit": "text/xml",
|
||||
".yz1": "application/x-yz1",
|
||||
".z": "application/x-compress",
|
||||
".zac": "application/x-zaurus-zac",
|
||||
".json": "application/json",
|
||||
}
|
||||
|
||||
// TypeByExtension returns the MIME type associated with the file extension ext.
|
||||
// gets the file's MIME type for HTTP header Content-Type
|
||||
func TypeByExtension(filePath string) string {
|
||||
typ := mime.TypeByExtension(path.Ext(filePath))
|
||||
if typ == "" {
|
||||
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
|
||||
}
|
||||
return typ
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Response defines HTTP response from OSS
|
||||
type Response struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
ClientCRC uint64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
func (r *Response) Read(p []byte) (n int, err error) {
|
||||
return r.Body.Read(p)
|
||||
}
|
||||
|
||||
// Close close http reponse body
|
||||
func (r *Response) Close() error {
|
||||
return r.Body.Close()
|
||||
}
|
||||
|
||||
// PutObjectRequest is the request of DoPutObject
|
||||
type PutObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
// GetObjectRequest is the request of DoGetObject
|
||||
type GetObjectRequest struct {
|
||||
ObjectKey string
|
||||
}
|
||||
|
||||
// GetObjectResult is the result of DoGetObject
|
||||
type GetObjectResult struct {
|
||||
Response *Response
|
||||
ClientCRC hash.Hash64
|
||||
ServerCRC uint64
|
||||
}
|
||||
|
||||
// AppendObjectRequest is the requtest of DoAppendObject
|
||||
type AppendObjectRequest struct {
|
||||
ObjectKey string
|
||||
Reader io.Reader
|
||||
Position int64
|
||||
}
|
||||
|
||||
// AppendObjectResult is the result of DoAppendObject
|
||||
type AppendObjectResult struct {
|
||||
NextPosition int64
|
||||
CRC uint64
|
||||
}
|
||||
|
||||
// UploadPartRequest is the request of DoUploadPart
|
||||
type UploadPartRequest struct {
|
||||
InitResult *InitiateMultipartUploadResult
|
||||
Reader io.Reader
|
||||
PartSize int64
|
||||
PartNumber int
|
||||
}
|
||||
|
||||
// UploadPartResult is the result of DoUploadPart
|
||||
type UploadPartResult struct {
|
||||
Part UploadPart
|
||||
}
|
@ -0,0 +1,474 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CopyFile is multipart copy object
|
||||
//
|
||||
// srcBucketName source bucket name
|
||||
// srcObjectKey source object name
|
||||
// destObjectKey target object name in the form of bucketname.objectkey
|
||||
// partSize the part size in byte.
|
||||
// options object's contraints. Check out function InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
|
||||
destBucketName := bucket.BucketName
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (1024KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
var strVersionId string
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
strVersionId = versionId.(string)
|
||||
}
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
|
||||
if cpFilePath != "" {
|
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
|
||||
partSize, options, routines)
|
||||
}
|
||||
|
||||
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
|
||||
cpFileName := getCpFileName(src, dest, versionId)
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- Concurrently copy without checkpoint ---------
|
||||
|
||||
// copyWorkerArg defines the copy worker arguments
|
||||
type copyWorkerArg struct {
|
||||
bucket *Bucket
|
||||
imur InitiateMultipartUploadResult
|
||||
srcBucketName string
|
||||
srcObjectKey string
|
||||
options []Option
|
||||
hook copyPartHook
|
||||
}
|
||||
|
||||
// copyPartHook is the hook for testing purpose
|
||||
type copyPartHook func(part copyPart) error
|
||||
|
||||
var copyPartHooker copyPartHook = defaultCopyPartHook
|
||||
|
||||
func defaultCopyPartHook(part copyPart) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyWorker copies worker
|
||||
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
chunkSize := chunk.End - chunk.Start + 1
|
||||
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
|
||||
chunk.Start, chunkSize, chunk.Number, arg.options...)
|
||||
if err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// copyScheduler
|
||||
func copyScheduler(jobs chan copyPart, parts []copyPart) {
|
||||
for _, part := range parts {
|
||||
jobs <- part
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
// copyPart structure
|
||||
type copyPart struct {
|
||||
Number int // Part number (from 1 to 10,000)
|
||||
Start int64 // The start index in the source file.
|
||||
End int64 // The end index in the source file
|
||||
}
|
||||
|
||||
// getCopyParts calculates copy parts
|
||||
func getCopyParts(objectSize, partSize int64) []copyPart {
|
||||
parts := []copyPart{}
|
||||
part := copyPart{}
|
||||
i := 0
|
||||
for offset := int64(0); offset < objectSize; offset += partSize {
|
||||
part.Number = i + 1
|
||||
part.Start = offset
|
||||
part.End = GetPartEnd(offset, objectSize, partSize)
|
||||
parts = append(parts, part)
|
||||
i++
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// getSrcObjectBytes gets the source file size
|
||||
func getSrcObjectBytes(parts []copyPart) int64 {
|
||||
var ob int64
|
||||
for _, part := range parts {
|
||||
ob += (part.End - part.Start + 1)
|
||||
}
|
||||
return ob
|
||||
}
|
||||
|
||||
// copyFile is a concurrently copy without checkpoint
|
||||
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get copy parts
|
||||
parts := getCopyParts(objectSize, partSize)
|
||||
// Initialize the multipart upload
|
||||
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getSrcObjectBytes(parts)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start to copy workers
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts finished.
|
||||
completed := 0
|
||||
ups := make([]UploadPart, len(parts))
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ups[part.PartNumber-1] = part
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
descBucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- Concurrently copy with checkpoint -----
|
||||
|
||||
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
|
||||
|
||||
type copyCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // CP content MD5
|
||||
SrcBucketName string // Source bucket
|
||||
SrcObjectKey string // Source object
|
||||
DestBucketName string // Target bucket
|
||||
DestObjectKey string // Target object
|
||||
CopyID string // Copy ID
|
||||
ObjStat objectStat // Object stat
|
||||
Parts []copyPart // Copy parts
|
||||
CopyParts []UploadPart // The uploaded parts
|
||||
PartStat []bool // The part status
|
||||
}
|
||||
|
||||
// isValid checks if the data is valid which means CP is valid and object is not updated.
|
||||
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
|
||||
// Compare CP's magic number and the MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the object size and last modified time and etag.
|
||||
if cp.ObjStat.Size != objectSize ||
|
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
|
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the checkpoint file
|
||||
func (cp *copyCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// update updates the parts status
|
||||
func (cp *copyCheckpoint) update(part UploadPart) {
|
||||
cp.CopyParts[part.PartNumber-1] = part
|
||||
cp.PartStat[part.PartNumber-1] = true
|
||||
}
|
||||
|
||||
// dump dumps the CP to the file
|
||||
func (cp *copyCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp copyCheckpoint) todoParts() []copyPart {
|
||||
dps := []copyPart{}
|
||||
for i, ps := range cp.PartStat {
|
||||
if !ps {
|
||||
dps = append(dps, cp.Parts[i])
|
||||
}
|
||||
}
|
||||
return dps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns finished bytes count
|
||||
func (cp copyCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for i, part := range cp.Parts {
|
||||
if cp.PartStat[i] {
|
||||
completedBytes += (part.End - part.Start + 1)
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
|
||||
partSize int64, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = copyCpMagic
|
||||
cp.SrcBucketName = srcBucket.BucketName
|
||||
cp.SrcObjectKey = srcObjectKey
|
||||
cp.DestBucketName = destBucket.BucketName
|
||||
cp.DestObjectKey = destObjectKey
|
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.ObjStat.Size = objectSize
|
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
|
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
|
||||
|
||||
// Parts
|
||||
cp.Parts = getCopyParts(objectSize, partSize)
|
||||
cp.PartStat = make([]bool, len(cp.Parts))
|
||||
for i := range cp.PartStat {
|
||||
cp.PartStat[i] = false
|
||||
}
|
||||
cp.CopyParts = make([]UploadPart, len(cp.Parts))
|
||||
|
||||
// Init copy
|
||||
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.CopyID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
|
||||
Key: cp.DestObjectKey, UploadID: cp.CopyID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// copyFileWithCp is concurrently copy with checkpoint
|
||||
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
|
||||
partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
descBucket, err := bucket.Client.Bucket(destBucketName)
|
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName)
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// Load CP data
|
||||
ccp := copyCheckpoint{}
|
||||
err = ccp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options)
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid---reinitialize
|
||||
valid, err := ccp.isValid(meta)
|
||||
if err != nil || !valid {
|
||||
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Unfinished parts
|
||||
parts := ccp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: destBucketName,
|
||||
Key: destObjectKey,
|
||||
UploadID: ccp.CopyID}
|
||||
|
||||
jobs := make(chan copyPart, len(parts))
|
||||
results := make(chan UploadPart, len(parts))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ccp.getCompletedBytes()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutines
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go copyWorker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts)
|
||||
|
||||
// Wait for the parts completed.
|
||||
completed := 0
|
||||
for completed < len(parts) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ccp.update(part)
|
||||
ccp.dump(cpFilePath)
|
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
|
||||
completedBytes += copyBytes
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(parts) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
|
||||
}
|
@ -0,0 +1,305 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// InitiateMultipartUpload initializes multipart upload
|
||||
//
|
||||
// objectKey object name
|
||||
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
// ServerSideEncryption, Meta, check out the following link:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
|
||||
//
|
||||
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
|
||||
var imur InitiateMultipartUploadResult
|
||||
opts := AddContentType(options, objectKey)
|
||||
params, _ := GetRawParams(options)
|
||||
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
|
||||
ConvertEmptyValueToNil(params, paramKeys)
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return imur, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &imur)
|
||||
return imur, err
|
||||
}
|
||||
|
||||
// UploadPart uploads parts
|
||||
//
|
||||
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
|
||||
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
|
||||
// And thus with the same part number and upload Id, another part upload will overwrite the data.
|
||||
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
|
||||
//
|
||||
// imur the returned value of InitiateMultipartUpload.
|
||||
// reader io.Reader the reader for the part's data.
|
||||
// size the part size.
|
||||
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
|
||||
//
|
||||
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
|
||||
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: reader,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// UploadPartFromFile uploads part from the file.
|
||||
//
|
||||
// imur the return value of a successful InitiateMultipartUpload.
|
||||
// filePath the local file path to upload.
|
||||
// startPosition the start position in the local file.
|
||||
// partSize the part size.
|
||||
// partNumber the part number (from 1 to 10,000)
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var part = UploadPart{}
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer fd.Close()
|
||||
fd.Seek(startPosition, os.SEEK_SET)
|
||||
|
||||
request := &UploadPartRequest{
|
||||
InitResult: &imur,
|
||||
Reader: fd,
|
||||
PartSize: partSize,
|
||||
PartNumber: partNumber,
|
||||
}
|
||||
|
||||
result, err := bucket.DoUploadPart(request, options)
|
||||
|
||||
return result.Part, err
|
||||
}
|
||||
|
||||
// DoUploadPart does the actual part upload.
|
||||
//
|
||||
// request part upload request
|
||||
//
|
||||
// UploadPartResult the result of uploading part.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
|
||||
listener := GetProgressListener(options)
|
||||
options = append(options, ContentLength(request.PartSize))
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(request.PartNumber)
|
||||
params["uploadId"] = request.InitResult.UploadID
|
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
|
||||
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
|
||||
if err != nil {
|
||||
return &UploadPartResult{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
part := UploadPart{
|
||||
ETag: resp.Headers.Get(HTTPHeaderEtag),
|
||||
PartNumber: request.PartNumber,
|
||||
}
|
||||
|
||||
if bucket.GetConfig().IsEnableCRC {
|
||||
err = CheckCRC(resp, "DoUploadPart")
|
||||
if err != nil {
|
||||
return &UploadPartResult{part}, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UploadPartResult{part}, nil
|
||||
}
|
||||
|
||||
// UploadPartCopy uploads part copy
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload
|
||||
// copySrc source Object name
|
||||
// startPosition the part's start index in the source file
|
||||
// partSize the part size
|
||||
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
|
||||
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
|
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
|
||||
var out UploadPartCopyResult
|
||||
var part UploadPart
|
||||
var opts []Option
|
||||
|
||||
//first find version id
|
||||
versionIdKey := "versionId"
|
||||
versionId, _ := FindOption(options, versionIdKey, nil)
|
||||
if versionId == nil {
|
||||
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
} else {
|
||||
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
|
||||
CopySourceRange(startPosition, partSize)}
|
||||
options = DeleteOption(options, versionIdKey)
|
||||
}
|
||||
|
||||
opts = append(opts, options...)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["partNumber"] = strconv.Itoa(partNumber)
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return part, err
|
||||
}
|
||||
part.ETag = out.ETag
|
||||
part.PartNumber = partNumber
|
||||
|
||||
return part, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
|
||||
//
|
||||
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
|
||||
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
|
||||
var out CompleteMultipartUploadResult
|
||||
|
||||
sort.Sort(UploadParts(parts))
|
||||
cxml := completeMultipartUploadXML{}
|
||||
cxml.Part = parts
|
||||
bs, err := xml.Marshal(cxml)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
|
||||
params := map[string]interface{}{}
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// ListUploadedParts lists the uploaded parts.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
|
||||
var out ListUploadedPartsResult
|
||||
options = append(options, EncodingType("url"))
|
||||
|
||||
params := map[string]interface{}{}
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
params["uploadId"] = imur.UploadID
|
||||
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListUploadedPartsResult(&out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all ongoing multipart upload tasks
|
||||
//
|
||||
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
|
||||
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
|
||||
//
|
||||
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
|
||||
var out ListMultipartUploadResult
|
||||
|
||||
options = append(options, EncodingType("url"))
|
||||
params, err := GetRawParams(options)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
params["uploads"] = nil
|
||||
|
||||
resp, err := bucket.do("GET", "", params, options, nil, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.Body, &out)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = decodeListMultipartUploadResult(&out)
|
||||
return out, err
|
||||
}
|
@ -0,0 +1,689 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type optionType string
|
||||
|
||||
const (
|
||||
optionParam optionType = "HTTPParameter" // URL parameter
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP header
|
||||
optionArg optionType = "FuncArgument" // Function argument
|
||||
)
|
||||
|
||||
const (
|
||||
deleteObjectsQuiet = "delete-objects-quiet"
|
||||
routineNum = "x-routine-num"
|
||||
checkpointConfig = "x-cp-config"
|
||||
initCRC64 = "init-crc64"
|
||||
progressListener = "x-progress-listener"
|
||||
storageClass = "storage-class"
|
||||
responseHeader = "x-response-header"
|
||||
redundancyType = "redundancy-type"
|
||||
objectHashFunc = "object-hash-func"
|
||||
)
|
||||
|
||||
type (
|
||||
optionValue struct {
|
||||
Value interface{}
|
||||
Type optionType
|
||||
}
|
||||
|
||||
// Option HTTP option
|
||||
Option func(map[string]optionValue) error
|
||||
)
|
||||
|
||||
// ACL is an option to set X-Oss-Acl header
|
||||
func ACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssACL, string(acl))
|
||||
}
|
||||
|
||||
// ContentType is an option to set Content-Type header
|
||||
func ContentType(value string) Option {
|
||||
return setHeader(HTTPHeaderContentType, value)
|
||||
}
|
||||
|
||||
// ContentLength is an option to set Content-Length header
|
||||
func ContentLength(length int64) Option {
|
||||
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
|
||||
}
|
||||
|
||||
// CacheControl is an option to set Cache-Control header
|
||||
func CacheControl(value string) Option {
|
||||
return setHeader(HTTPHeaderCacheControl, value)
|
||||
}
|
||||
|
||||
// ContentDisposition is an option to set Content-Disposition header
|
||||
func ContentDisposition(value string) Option {
|
||||
return setHeader(HTTPHeaderContentDisposition, value)
|
||||
}
|
||||
|
||||
// ContentEncoding is an option to set Content-Encoding header
|
||||
func ContentEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderContentEncoding, value)
|
||||
}
|
||||
|
||||
// ContentLanguage is an option to set Content-Language header
|
||||
func ContentLanguage(value string) Option {
|
||||
return setHeader(HTTPHeaderContentLanguage, value)
|
||||
}
|
||||
|
||||
// ContentMD5 is an option to set Content-MD5 header
|
||||
func ContentMD5(value string) Option {
|
||||
return setHeader(HTTPHeaderContentMD5, value)
|
||||
}
|
||||
|
||||
// Expires is an option to set Expires header
|
||||
func Expires(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Meta is an option to set Meta header
|
||||
func Meta(key, value string) Option {
|
||||
return setHeader(HTTPHeaderOssMetaPrefix+key, value)
|
||||
}
|
||||
|
||||
// Range is an option to set Range header, [start, end]
|
||||
func Range(start, end int64) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
}
|
||||
|
||||
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
|
||||
func NormalizedRange(nr string) Option {
|
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
|
||||
}
|
||||
|
||||
// AcceptEncoding is an option to set Accept-Encoding header
|
||||
func AcceptEncoding(value string) Option {
|
||||
return setHeader(HTTPHeaderAcceptEncoding, value)
|
||||
}
|
||||
|
||||
// IfModifiedSince is an option to set If-Modified-Since header
|
||||
func IfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
|
||||
func IfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// IfMatch is an option to set If-Match header
|
||||
func IfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfMatch, value)
|
||||
}
|
||||
|
||||
// IfNoneMatch is an option to set IfNoneMatch header
|
||||
func IfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySource is an option to set X-Oss-Copy-Source header
|
||||
func CopySource(sourceBucket, sourceObject string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
|
||||
}
|
||||
|
||||
// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
|
||||
func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId)
|
||||
}
|
||||
|
||||
// CopySourceRange is an option to set X-Oss-Copy-Source header
|
||||
func CopySourceRange(startPosition, partSize int64) Option {
|
||||
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
|
||||
strconv.FormatInt((startPosition+partSize-1), 10)
|
||||
return setHeader(HTTPHeaderOssCopySourceRange, val)
|
||||
}
|
||||
|
||||
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
|
||||
func CopySourceIfMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
|
||||
func CopySourceIfNoneMatch(value string) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
|
||||
}
|
||||
|
||||
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
|
||||
func CopySourceIfModifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
|
||||
func CopySourceIfUnmodifiedSince(t time.Time) Option {
|
||||
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func MetadataDirective(directive MetadataDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
|
||||
}
|
||||
|
||||
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
|
||||
func ServerSideEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryption, value)
|
||||
}
|
||||
|
||||
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
|
||||
func ServerSideEncryptionKeyID(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
|
||||
}
|
||||
|
||||
// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
|
||||
func ServerSideDataEncryption(value string) Option {
|
||||
return setHeader(HTTPHeaderOssServerSideDataEncryption, value)
|
||||
}
|
||||
|
||||
// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
|
||||
func SSECAlgorithm(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECAlgorithm, value)
|
||||
}
|
||||
|
||||
// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
|
||||
func SSECKey(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKey, value)
|
||||
}
|
||||
|
||||
// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
|
||||
func SSECKeyMd5(value string) Option {
|
||||
return setHeader(HTTPHeaderSSECKeyMd5, value)
|
||||
}
|
||||
|
||||
// ObjectACL is an option to set X-Oss-Object-Acl header
|
||||
func ObjectACL(acl ACLType) Option {
|
||||
return setHeader(HTTPHeaderOssObjectACL, string(acl))
|
||||
}
|
||||
|
||||
// symlinkTarget is an option to set X-Oss-Symlink-Target
|
||||
func symlinkTarget(targetObjectKey string) Option {
|
||||
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
|
||||
}
|
||||
|
||||
// Origin is an option to set Origin header
|
||||
func Origin(value string) Option {
|
||||
return setHeader(HTTPHeaderOrigin, value)
|
||||
}
|
||||
|
||||
// ObjectStorageClass is an option to set the storage class of object
|
||||
func ObjectStorageClass(storageClass StorageClassType) Option {
|
||||
return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
|
||||
}
|
||||
|
||||
// Callback is an option to set callback values
|
||||
func Callback(callback string) Option {
|
||||
return setHeader(HTTPHeaderOssCallback, callback)
|
||||
}
|
||||
|
||||
// CallbackVar is an option to set callback user defined values
|
||||
func CallbackVar(callbackVar string) Option {
|
||||
return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
|
||||
}
|
||||
|
||||
// RequestPayer is an option to set payer who pay for the request
|
||||
func RequestPayer(payerType PayerType) Option {
|
||||
return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// RequestPayerParam is an option to set payer who pay for the request
|
||||
func RequestPayerParam(payerType PayerType) Option {
|
||||
return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType)))
|
||||
}
|
||||
|
||||
// SetTagging is an option to set object tagging
|
||||
func SetTagging(tagging Tagging) Option {
|
||||
if len(tagging.Tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
taggingValue := ""
|
||||
for index, tag := range tagging.Tags {
|
||||
if index != 0 {
|
||||
taggingValue += "&"
|
||||
}
|
||||
taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value)
|
||||
}
|
||||
return setHeader(HTTPHeaderOssTagging, taggingValue)
|
||||
}
|
||||
|
||||
// TaggingDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func TaggingDirective(directive TaggingDirectiveType) Option {
|
||||
return setHeader(HTTPHeaderOssTaggingDirective, string(directive))
|
||||
}
|
||||
|
||||
// ACReqMethod is an option to set Access-Control-Request-Method header
|
||||
func ACReqMethod(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqMethod, value)
|
||||
}
|
||||
|
||||
// ACReqHeaders is an option to set Access-Control-Request-Headers header
|
||||
func ACReqHeaders(value string) Option {
|
||||
return setHeader(HTTPHeaderACReqHeaders, value)
|
||||
}
|
||||
|
||||
// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
|
||||
func TrafficLimitHeader(value int64) Option {
|
||||
return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// UserAgentHeader is an option to set HTTPHeaderUserAgent
|
||||
func UserAgentHeader(ua string) Option {
|
||||
return setHeader(HTTPHeaderUserAgent, ua)
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func ForbidOverWrite(forbidWrite bool) Option {
|
||||
if forbidWrite {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "false")
|
||||
}
|
||||
}
|
||||
|
||||
// RangeBehavior is an option to set Range value, such as "standard"
|
||||
func RangeBehavior(value string) Option {
|
||||
return setHeader(HTTPHeaderOssRangeBehavior, value)
|
||||
}
|
||||
|
||||
func PartHashCtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssHashCtx, value)
|
||||
}
|
||||
|
||||
func PartMd5CtxHeader(value string) Option {
|
||||
return setHeader(HTTPHeaderOssMd5Ctx, value)
|
||||
}
|
||||
|
||||
func PartHashCtxParam(value string) Option {
|
||||
return addParam("x-oss-hash-ctx", value)
|
||||
}
|
||||
|
||||
func PartMd5CtxParam(value string) Option {
|
||||
return addParam("x-oss-md5-ctx", value)
|
||||
}
|
||||
|
||||
// Delimiter is an option to set delimiler parameter
|
||||
func Delimiter(value string) Option {
|
||||
return addParam("delimiter", value)
|
||||
}
|
||||
|
||||
// Marker is an option to set marker parameter
|
||||
func Marker(value string) Option {
|
||||
return addParam("marker", value)
|
||||
}
|
||||
|
||||
// MaxKeys is an option to set maxkeys parameter
|
||||
func MaxKeys(value int) Option {
|
||||
return addParam("max-keys", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Prefix is an option to set prefix parameter
|
||||
func Prefix(value string) Option {
|
||||
return addParam("prefix", value)
|
||||
}
|
||||
|
||||
// EncodingType is an option to set encoding-type parameter
|
||||
func EncodingType(value string) Option {
|
||||
return addParam("encoding-type", value)
|
||||
}
|
||||
|
||||
// MaxUploads is an option to set max-uploads parameter
|
||||
func MaxUploads(value int) Option {
|
||||
return addParam("max-uploads", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// KeyMarker is an option to set key-marker parameter
|
||||
func KeyMarker(value string) Option {
|
||||
return addParam("key-marker", value)
|
||||
}
|
||||
|
||||
// VersionIdMarker is an option to set version-id-marker parameter
|
||||
func VersionIdMarker(value string) Option {
|
||||
return addParam("version-id-marker", value)
|
||||
}
|
||||
|
||||
// VersionId is an option to set versionId parameter
|
||||
func VersionId(value string) Option {
|
||||
return addParam("versionId", value)
|
||||
}
|
||||
|
||||
// TagKey is an option to set tag key parameter
|
||||
func TagKey(value string) Option {
|
||||
return addParam("tag-key", value)
|
||||
}
|
||||
|
||||
// TagValue is an option to set tag value parameter
|
||||
func TagValue(value string) Option {
|
||||
return addParam("tag-value", value)
|
||||
}
|
||||
|
||||
// UploadIDMarker is an option to set upload-id-marker parameter
|
||||
func UploadIDMarker(value string) Option {
|
||||
return addParam("upload-id-marker", value)
|
||||
}
|
||||
|
||||
// MaxParts is an option to set max-parts parameter
|
||||
func MaxParts(value int) Option {
|
||||
return addParam("max-parts", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// PartNumberMarker is an option to set part-number-marker parameter
|
||||
func PartNumberMarker(value int) Option {
|
||||
return addParam("part-number-marker", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Sequential is an option to set sequential parameter for InitiateMultipartUpload
|
||||
func Sequential() Option {
|
||||
return addParam("sequential", "")
|
||||
}
|
||||
|
||||
// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
|
||||
func WithHashContext() Option {
|
||||
return addParam("withHashContext", "")
|
||||
}
|
||||
|
||||
// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
|
||||
func EnableMd5() Option {
|
||||
return addParam("x-oss-enable-md5", "")
|
||||
}
|
||||
|
||||
// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
|
||||
func EnableSha1() Option {
|
||||
return addParam("x-oss-enable-sha1", "")
|
||||
}
|
||||
|
||||
// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
|
||||
func EnableSha256() Option {
|
||||
return addParam("x-oss-enable-sha256", "")
|
||||
}
|
||||
|
||||
// ListType is an option to set List-type parameter for ListObjectsV2
|
||||
func ListType(value int) Option {
|
||||
return addParam("list-type", strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// StartAfter is an option to set start-after parameter for ListObjectsV2
|
||||
func StartAfter(value string) Option {
|
||||
return addParam("start-after", value)
|
||||
}
|
||||
|
||||
// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
|
||||
func ContinuationToken(value string) Option {
|
||||
if value == "" {
|
||||
return addParam("continuation-token", nil)
|
||||
}
|
||||
return addParam("continuation-token", value)
|
||||
}
|
||||
|
||||
// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
|
||||
func FetchOwner(value bool) Option {
|
||||
if value {
|
||||
return addParam("fetch-owner", "true")
|
||||
}
|
||||
return addParam("fetch-owner", "false")
|
||||
}
|
||||
|
||||
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
|
||||
func DeleteObjectsQuiet(isQuiet bool) Option {
|
||||
return addArg(deleteObjectsQuiet, isQuiet)
|
||||
}
|
||||
|
||||
// StorageClass bucket storage class
|
||||
func StorageClass(value StorageClassType) Option {
|
||||
return addArg(storageClass, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func RedundancyType(value DataRedundancyType) Option {
|
||||
return addArg(redundancyType, value)
|
||||
}
|
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func ObjectHashFunc(value ObjecthashFuncType) Option {
|
||||
return addArg(objectHashFunc, value)
|
||||
}
|
||||
|
||||
// Checkpoint configuration
|
||||
type cpConfig struct {
|
||||
IsEnable bool
|
||||
FilePath string
|
||||
DirPath string
|
||||
}
|
||||
|
||||
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
|
||||
func Checkpoint(isEnable bool, filePath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
|
||||
}
|
||||
|
||||
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
|
||||
func CheckpointDir(isEnable bool, dirPath string) Option {
|
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
|
||||
}
|
||||
|
||||
// Routines DownloadFile/UploadFile routine count
|
||||
func Routines(n int) Option {
|
||||
return addArg(routineNum, n)
|
||||
}
|
||||
|
||||
// InitCRC Init AppendObject CRC
|
||||
func InitCRC(initCRC uint64) Option {
|
||||
return addArg(initCRC64, initCRC)
|
||||
}
|
||||
|
||||
// Progress set progress listener
|
||||
func Progress(listener ProgressListener) Option {
|
||||
return addArg(progressListener, listener)
|
||||
}
|
||||
|
||||
// GetResponseHeader for get response http header
|
||||
func GetResponseHeader(respHeader *http.Header) Option {
|
||||
return addArg(responseHeader, respHeader)
|
||||
}
|
||||
|
||||
// ResponseContentType is an option to set response-content-type param
|
||||
func ResponseContentType(value string) Option {
|
||||
return addParam("response-content-type", value)
|
||||
}
|
||||
|
||||
// ResponseContentLanguage is an option to set response-content-language param
|
||||
func ResponseContentLanguage(value string) Option {
|
||||
return addParam("response-content-language", value)
|
||||
}
|
||||
|
||||
// ResponseExpires is an option to set response-expires param
|
||||
func ResponseExpires(value string) Option {
|
||||
return addParam("response-expires", value)
|
||||
}
|
||||
|
||||
// ResponseCacheControl is an option to set response-cache-control param
|
||||
func ResponseCacheControl(value string) Option {
|
||||
return addParam("response-cache-control", value)
|
||||
}
|
||||
|
||||
// ResponseContentDisposition is an option to set response-content-disposition param
|
||||
func ResponseContentDisposition(value string) Option {
|
||||
return addParam("response-content-disposition", value)
|
||||
}
|
||||
|
||||
// ResponseContentEncoding is an option to set response-content-encoding param
|
||||
func ResponseContentEncoding(value string) Option {
|
||||
return addParam("response-content-encoding", value)
|
||||
}
|
||||
|
||||
// Process is an option to set x-oss-process param
|
||||
func Process(value string) Option {
|
||||
return addParam("x-oss-process", value)
|
||||
}
|
||||
|
||||
// TrafficLimitParam is a option to set x-oss-traffic-limit
|
||||
func TrafficLimitParam(value int64) Option {
|
||||
return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// SetHeader Allow users to set personalized http headers
|
||||
func SetHeader(key string, value interface{}) Option {
|
||||
return setHeader(key, value)
|
||||
}
|
||||
|
||||
// AddParam Allow users to set personalized http params
|
||||
func AddParam(key string, value interface{}) Option {
|
||||
return addParam(key, value)
|
||||
}
|
||||
|
||||
func setHeader(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionHTTP}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addParam(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionParam}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addArg(key string, value interface{}) Option {
|
||||
return func(params map[string]optionValue) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
params[key] = optionValue{value, optionArg}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleOptions(headers map[string]string, options []Option) error {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range params {
|
||||
if v.Type == optionHTTP {
|
||||
headers[k] = v.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetRawParams(options []Option) (map[string]interface{}, error) {
|
||||
// Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
paramsm := map[string]interface{}{}
|
||||
// Serialize
|
||||
for k, v := range params {
|
||||
if v.Type == optionParam {
|
||||
vs := params[k]
|
||||
paramsm[k] = vs.Value.(string)
|
||||
}
|
||||
}
|
||||
|
||||
return paramsm, nil
|
||||
}
|
||||
|
||||
func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[param]; ok {
|
||||
return val.Value, nil
|
||||
}
|
||||
return defaultVal, nil
|
||||
}
|
||||
|
||||
func IsOptionSet(options []Option, option string) (bool, interface{}, error) {
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
if err := option(params); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := params[option]; ok {
|
||||
return true, val.Value, nil
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
func DeleteOption(options []Option, strKey string) []Option {
|
||||
var outOption []Option
|
||||
params := map[string]optionValue{}
|
||||
for _, option := range options {
|
||||
if option != nil {
|
||||
option(params)
|
||||
_, exist := params[strKey]
|
||||
if !exist {
|
||||
outOption = append(outOption, option)
|
||||
} else {
|
||||
delete(params, strKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
return outOption
|
||||
}
|
||||
|
||||
func GetRequestId(header http.Header) string {
|
||||
return header.Get("x-oss-request-id")
|
||||
}
|
||||
|
||||
func GetVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-version-id")
|
||||
}
|
||||
|
||||
func GetCopySrcVersionId(header http.Header) string {
|
||||
return header.Get("x-oss-copy-source-version-id")
|
||||
}
|
||||
|
||||
func GetDeleteMark(header http.Header) bool {
|
||||
value := header.Get("x-oss-delete-marker")
|
||||
if strings.ToUpper(value) == "TRUE" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetQosDelayTime(header http.Header) string {
|
||||
return header.Get("x-oss-qos-delay-time")
|
||||
}
|
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func AllowSameActionOverLap(enabled bool) Option {
|
||||
if enabled {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "true")
|
||||
} else {
|
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "false")
|
||||
}
|
||||
}
|
@ -0,0 +1,116 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
type ProgressEventType int
|
||||
|
||||
const (
|
||||
// TransferStartedEvent transfer started, set TotalBytes
|
||||
TransferStartedEvent ProgressEventType = 1 + iota
|
||||
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
|
||||
TransferDataEvent
|
||||
// TransferCompletedEvent transfer completed
|
||||
TransferCompletedEvent
|
||||
// TransferFailedEvent transfer encounters an error
|
||||
TransferFailedEvent
|
||||
)
|
||||
|
||||
// ProgressEvent defines progress event
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
RwBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
// ProgressListener listens progress change
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
// -------------------- Private --------------------
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
ConsumedBytes: consumed,
|
||||
TotalBytes: total,
|
||||
RwBytes: rwBytes,
|
||||
EventType: eventType}
|
||||
}
|
||||
|
||||
// publishProgress
|
||||
func publishProgress(listener ProgressListener, event *ProgressEvent) {
|
||||
if listener != nil && event != nil {
|
||||
listener.ProgressChanged(event)
|
||||
}
|
||||
}
|
||||
|
||||
type readerTracker struct {
|
||||
completedBytes int64
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader io.Reader
|
||||
writer io.Writer
|
||||
listener ProgressListener
|
||||
consumedBytes int64
|
||||
totalBytes int64
|
||||
tracker *readerTracker
|
||||
}
|
||||
|
||||
// TeeReader returns a Reader that writes to w what it reads from r.
|
||||
// All reads from r performed through it are matched with
|
||||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
|
||||
return &teeReader{
|
||||
reader: reader,
|
||||
writer: writer,
|
||||
listener: listener,
|
||||
consumedBytes: 0,
|
||||
totalBytes: totalBytes,
|
||||
tracker: tracker,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *teeReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
|
||||
// Read encountered error
|
||||
if err != nil && err != io.EOF {
|
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.consumedBytes += int64(n)
|
||||
// CRC
|
||||
if t.writer != nil {
|
||||
if n, err := t.writer.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
// Progress
|
||||
if t.listener != nil {
|
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
// Track
|
||||
if t.tracker != nil {
|
||||
t.tracker.completedBytes = t.consumedBytes
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *teeReader) Close() error {
|
||||
if rc, ok := t.reader.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import "net/http"
|
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
func disableHTTPRedirect(client *http.Client) {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
@ -0,0 +1,197 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CreateSelectCsvObjectMeta is Creating csv object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the csv file meta
|
||||
// options the options for create csv Meta of the object.
|
||||
//
|
||||
// MetaEndFrameCSV the csv file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) {
|
||||
var endFrame MetaEndFrameCSV
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "csv/meta"
|
||||
|
||||
csvMeta.encodeBase64()
|
||||
bs, err := xml.Marshal(csvMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameCSV, err
|
||||
}
|
||||
|
||||
// CreateSelectJsonObjectMeta is Creating json object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the json file meta
|
||||
// options the options for create json Meta of the object.
|
||||
//
|
||||
// MetaEndFrameJSON the json file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) {
|
||||
var endFrame MetaEndFrameJSON
|
||||
params := map[string]interface{}{}
|
||||
params["x-oss-process"] = "json/meta"
|
||||
|
||||
bs, err := xml.Marshal(jsonMeta)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return endFrame, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = ioutil.ReadAll(resp)
|
||||
|
||||
return resp.Frame.MetaEndFrameJSON, err
|
||||
}
|
||||
|
||||
// SelectObject is the select object api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) {
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true {
|
||||
resp.Frame.EnablePayloadCrc = true
|
||||
}
|
||||
resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE"
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
|
||||
// buf the request data trans to buffer.
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// SelectObjectResponse the response of select object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) {
|
||||
resp, err := bucket.do("POST", key, params, options, buf, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SelectObjectResponse{
|
||||
Body: resp.Body,
|
||||
StatusCode: resp.StatusCode,
|
||||
Frame: SelectObjectResult{},
|
||||
}
|
||||
result.Headers = resp.Headers
|
||||
// result.Frame = SelectObjectResult{}
|
||||
result.ReadTimeOut = bucket.GetConfig().Timeout
|
||||
|
||||
// Progress
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
// CRC32
|
||||
crcCalc := crc32.NewIEEE()
|
||||
result.WriterForCheckCrc32 = crcCalc
|
||||
result.Body = TeeReader(resp.Body, nil, 0, listener, nil)
|
||||
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SelectObjectIntoFile is the selectObject to file api
|
||||
//
|
||||
// key the object key.
|
||||
// fileName saving file's name to localstation.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error {
|
||||
tempFilePath := fileName + TempFileSuffix
|
||||
|
||||
params := map[string]interface{}{}
|
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() {
|
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else {
|
||||
params["x-oss-process"] = "json/select"
|
||||
}
|
||||
selectReq.encodeBase64()
|
||||
bs, err := xml.Marshal(selectReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer.Write(bs)
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
// If the local file does not exist, create a new one. If it exists, overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the data to the local file path.
|
||||
_, err = io.Copy(fd, resp)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tempFilePath, fileName)
|
||||
}
|
@ -0,0 +1,364 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The adapter class for Select object's response.
|
||||
// The response consists of frames. Each frame has the following format:
|
||||
|
||||
// Type | Payload Length | Header Checksum | Payload | Payload Checksum
|
||||
|
||||
// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
|
||||
// And we have three kind of frames.
|
||||
// Data Frame:
|
||||
// Type:8388609
|
||||
// Payload: Offset | Data
|
||||
// <-8 bytes>
|
||||
|
||||
// Continuous Frame
|
||||
// Type:8388612
|
||||
// Payload: Offset (8-bytes)
|
||||
|
||||
// End Frame
|
||||
// Type:8388613
|
||||
// Payload: Offset | total scanned bytes | http status code | error message
|
||||
// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
|
||||
|
||||
// SelectObjectResponse defines HTTP response from OSS SelectObject
|
||||
type SelectObjectResponse struct {
|
||||
StatusCode int
|
||||
Headers http.Header
|
||||
Body io.ReadCloser
|
||||
Frame SelectObjectResult
|
||||
ReadTimeOut uint
|
||||
ClientCRC32 uint32
|
||||
ServerCRC32 uint32
|
||||
WriterForCheckCrc32 hash.Hash32
|
||||
Finish bool
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) {
|
||||
n, err = sr.readFrames(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Close http reponse body
|
||||
func (sr *SelectObjectResponse) Close() error {
|
||||
return sr.Body.Close()
|
||||
}
|
||||
|
||||
// PostSelectResult is the request of SelectObject
|
||||
type PostSelectResult struct {
|
||||
Response *SelectObjectResponse
|
||||
}
|
||||
|
||||
// readFrames is read Frame
|
||||
func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) {
|
||||
var nn int
|
||||
var err error
|
||||
var checkValid bool
|
||||
if sr.Frame.OutputRawData == true {
|
||||
nn, err = sr.Body.Read(p)
|
||||
return nn, err
|
||||
}
|
||||
|
||||
if sr.Finish {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for {
|
||||
// if this Frame is Readed, then not reading Header
|
||||
if sr.Frame.OpenLine != true {
|
||||
err = sr.analysisHeader()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
|
||||
if sr.Frame.FrameType == DataFrameType {
|
||||
n, err := sr.analysisData(p[nn:])
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += n
|
||||
|
||||
// if this Frame is readed all data, then empty the Frame to read it with next frame
|
||||
if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
sr.emptyFrame()
|
||||
}
|
||||
|
||||
if nn == len(p) {
|
||||
return nn, nil
|
||||
}
|
||||
} else if sr.Frame.FrameType == ContinuousFrameType {
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if err != nil || !checkValid {
|
||||
return nn, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
} else if sr.Frame.FrameType == EndFrameType {
|
||||
err = sr.analysisEndFrame()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameCSVType {
|
||||
err = sr.analysisMetaEndFrameCSV()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
} else if sr.Frame.FrameType == MetaEndFrameJSONType {
|
||||
err = sr.analysisMetaEndFrameJSON()
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
checkValid, err = sr.checkPayloadSum()
|
||||
if checkValid {
|
||||
sr.Finish = true
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
type chanReadIO struct {
|
||||
readLen int
|
||||
err error
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) {
|
||||
r := sr.Body
|
||||
ch := make(chan chanReadIO, 1)
|
||||
defer close(ch)
|
||||
go func(p []byte) {
|
||||
var needReadLength int
|
||||
readChan := chanReadIO{}
|
||||
needReadLength = len(p)
|
||||
for {
|
||||
n, err := r.Read(p[readChan.readLen:needReadLength])
|
||||
readChan.readLen += n
|
||||
if err != nil {
|
||||
readChan.err = err
|
||||
ch <- readChan
|
||||
return
|
||||
}
|
||||
|
||||
if readChan.readLen == needReadLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
ch <- readChan
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * timeOut):
|
||||
return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p))
|
||||
case result := <-ch:
|
||||
return result.readLen, result.err
|
||||
}
|
||||
}
|
||||
|
||||
// analysisHeader is reading selectObject response body's header
|
||||
func (sr *SelectObjectResponse) analysisHeader() error {
|
||||
headFrameByte := make([]byte, 20)
|
||||
_, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
frameTypeByte := headFrameByte[0:4]
|
||||
sr.Frame.Version = frameTypeByte[0]
|
||||
frameTypeByte[0] = 0
|
||||
bytesToInt(frameTypeByte, &sr.Frame.FrameType)
|
||||
|
||||
if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType &&
|
||||
sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType {
|
||||
return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType)
|
||||
}
|
||||
|
||||
payloadLengthByte := headFrameByte[4:8]
|
||||
bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength)
|
||||
headCheckSumByte := headFrameByte[8:12]
|
||||
bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum)
|
||||
byteOffset := headFrameByte[12:20]
|
||||
bytesToInt(byteOffset, &sr.Frame.Offset)
|
||||
sr.Frame.OpenLine = true
|
||||
|
||||
err = sr.writerCheckCrc32(byteOffset)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisData is reading the DataFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) {
|
||||
var needReadLength int32
|
||||
lenP := int32(len(p))
|
||||
restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength
|
||||
if lenP <= restByteLength {
|
||||
needReadLength = lenP
|
||||
} else {
|
||||
needReadLength = restByteLength
|
||||
}
|
||||
n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("read frame data error,%s", err.Error())
|
||||
}
|
||||
sr.Frame.ConsumedBytesLength += int32(n)
|
||||
err = sr.writerCheckCrc32(p[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
// analysisEndFrame is reading the EndFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisEndFrame() error {
|
||||
var eF EndFrame
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read end frame error:%s", err.Error())
|
||||
}
|
||||
bytesToInt(payLoadBytes[0:8], &eF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode)
|
||||
errMsgLength := sr.Frame.PayloadLength - 20
|
||||
eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12])
|
||||
sr.Frame.EndFrame.TotalScanned = eF.TotalScanned
|
||||
sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode
|
||||
sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error {
|
||||
var mCF MetaEndFrameCSV
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end csv frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mCF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mCF.RowsCount)
|
||||
bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 36
|
||||
mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28])
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned
|
||||
sr.Frame.MetaEndFrameCSV.Status = mCF.Status
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error {
|
||||
var mJF MetaEndFrameJSON
|
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8)
|
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut))
|
||||
if err != nil {
|
||||
return fmt.Errorf("read meta end json frame error:%s", err.Error())
|
||||
}
|
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned)
|
||||
bytesToInt(payLoadBytes[8:12], &mJF.Status)
|
||||
bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount)
|
||||
bytesToInt(payLoadBytes[16:24], &mJF.RowsCount)
|
||||
errMsgLength := sr.Frame.PayloadLength - 32
|
||||
mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24])
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned
|
||||
sr.Frame.MetaEndFrameJSON.Status = mJF.Status
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount
|
||||
|
||||
err = sr.writerCheckCrc32(payLoadBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) {
|
||||
payLoadChecksumByte := make([]byte, 4)
|
||||
n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut))
|
||||
if n == 4 {
|
||||
bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum)
|
||||
sr.ServerCRC32 = sr.Frame.PayloadChecksum
|
||||
sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32()
|
||||
if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 {
|
||||
return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d",
|
||||
sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error())
|
||||
}
|
||||
|
||||
func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) {
|
||||
err = nil
|
||||
if sr.Frame.EnablePayloadCrc == true {
|
||||
_, err = sr.WriterForCheckCrc32.Write(p)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// emptyFrame is emptying SelectObjectResponse Frame information
|
||||
func (sr *SelectObjectResponse) emptyFrame() {
|
||||
crcCalc := crc32.NewIEEE()
|
||||
sr.WriterForCheckCrc32 = crcCalc
|
||||
sr.Finish = false
|
||||
|
||||
sr.Frame.ConsumedBytesLength = 0
|
||||
sr.Frame.OpenLine = false
|
||||
sr.Frame.Version = byte(0)
|
||||
sr.Frame.FrameType = 0
|
||||
sr.Frame.PayloadLength = 0
|
||||
sr.Frame.HeaderCheckSum = 0
|
||||
sr.Frame.Offset = 0
|
||||
sr.Frame.Data = ""
|
||||
|
||||
sr.Frame.EndFrame.TotalScanned = 0
|
||||
sr.Frame.EndFrame.HTTPStatusCode = 0
|
||||
sr.Frame.EndFrame.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameCSV.Status = 0
|
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = 0
|
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = ""
|
||||
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = 0
|
||||
sr.Frame.MetaEndFrameJSON.Status = 0
|
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.RowsCount = 0
|
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = ""
|
||||
|
||||
sr.Frame.PayloadChecksum = 0
|
||||
}
|
||||
|
||||
// bytesToInt byte's array trans to int
|
||||
func bytesToInt(b []byte, ret interface{}) {
|
||||
binBuf := bytes.NewBuffer(b)
|
||||
binary.Read(binBuf, binary.BigEndian, ret)
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
// +build !go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
// +build go1.7
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport {
|
||||
httpTimeOut := conn.config.HTTPTimeout
|
||||
httpMaxConns := conn.config.HTTPMaxConns
|
||||
// New Transport
|
||||
transport := &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
Timeout: httpTimeOut.ConnectTimeout,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
if config.LocalAddr != nil {
|
||||
d.LocalAddr = config.LocalAddr
|
||||
}
|
||||
conn, err := d.Dial(netw, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
|
||||
},
|
||||
MaxIdleConns: httpMaxConns.MaxIdleConns,
|
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost,
|
||||
MaxConnsPerHost: httpMaxConns.MaxConnsPerHost,
|
||||
IdleConnTimeout: httpTimeOut.IdleConnTimeout,
|
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
return transport
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,552 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UploadFile is multipart file upload.
|
||||
//
|
||||
// objectKey the object name.
|
||||
// filePath the local file path to upload.
|
||||
// partSize the part size in byte.
|
||||
// options the options for uploading object.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
|
||||
if partSize < MinPartSize || partSize > MaxPartSize {
|
||||
return errors.New("oss: part size invalid range (100KB, 5GB]")
|
||||
}
|
||||
|
||||
cpConf := getCpConfig(options)
|
||||
routines := getRoutines(options)
|
||||
|
||||
if cpConf != nil && cpConf.IsEnable {
|
||||
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
|
||||
if cpFilePath != "" {
|
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
|
||||
}
|
||||
|
||||
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
|
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" {
|
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
|
||||
absPath, _ := filepath.Abs(srcFile)
|
||||
cpFileName := getCpFileName(absPath, dest, "")
|
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
|
||||
}
|
||||
return cpConf.FilePath
|
||||
}
|
||||
|
||||
// ----- concurrent upload without checkpoint -----
|
||||
|
||||
// getCpConfig gets checkpoint configuration
|
||||
func getCpConfig(options []Option) *cpConfig {
|
||||
cpcOpt, err := FindOption(options, checkpointConfig, nil)
|
||||
if err != nil || cpcOpt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cpcOpt.(*cpConfig)
|
||||
}
|
||||
|
||||
// getCpFileName return the name of the checkpoint file
|
||||
func getCpFileName(src, dest, versionId string) string {
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(src))
|
||||
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(dest))
|
||||
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
|
||||
if versionId == "" {
|
||||
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
|
||||
}
|
||||
|
||||
md5Ctx.Reset()
|
||||
md5Ctx.Write([]byte(versionId))
|
||||
versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
|
||||
return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum)
|
||||
}
|
||||
|
||||
// getRoutines gets the routine count. by default it's 1.
|
||||
func getRoutines(options []Option) int {
|
||||
rtnOpt, err := FindOption(options, routineNum, nil)
|
||||
if err != nil || rtnOpt == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
rs := rtnOpt.(int)
|
||||
if rs < 1 {
|
||||
rs = 1
|
||||
} else if rs > 100 {
|
||||
rs = 100
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// getPayer return the payer of the request
|
||||
func getPayer(options []Option) string {
|
||||
payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if err != nil || payerOpt == nil {
|
||||
return ""
|
||||
}
|
||||
return payerOpt.(string)
|
||||
}
|
||||
|
||||
// GetProgressListener gets the progress callback
|
||||
func GetProgressListener(options []Option) ProgressListener {
|
||||
isSet, listener, _ := IsOptionSet(options, progressListener)
|
||||
if !isSet {
|
||||
return nil
|
||||
}
|
||||
return listener.(ProgressListener)
|
||||
}
|
||||
|
||||
// uploadPartHook is for testing usage
|
||||
type uploadPartHook func(id int, chunk FileChunk) error
|
||||
|
||||
var uploadPartHooker uploadPartHook = defaultUploadPart
|
||||
|
||||
func defaultUploadPart(id int, chunk FileChunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// workerArg defines worker argument structure
|
||||
type workerArg struct {
|
||||
bucket *Bucket
|
||||
filePath string
|
||||
imur InitiateMultipartUploadResult
|
||||
options []Option
|
||||
hook uploadPartHook
|
||||
}
|
||||
|
||||
// worker is the worker coroutine function
|
||||
type defaultUploadProgressListener struct {
|
||||
}
|
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) {
|
||||
}
|
||||
|
||||
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
|
||||
for chunk := range jobs {
|
||||
if err := arg.hook(id, chunk); err != nil {
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
var respHeader http.Header
|
||||
p := Progress(&defaultUploadProgressListener{})
|
||||
opts := make([]Option, len(arg.options)+2)
|
||||
opts = append(opts, arg.options...)
|
||||
|
||||
// use defaultUploadProgressListener
|
||||
opts = append(opts, p, GetResponseHeader(&respHeader))
|
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...)
|
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
|
||||
if err != nil {
|
||||
arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error())
|
||||
failed <- err
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-die:
|
||||
return
|
||||
default:
|
||||
}
|
||||
results <- part
|
||||
}
|
||||
}
|
||||
|
||||
// scheduler function
|
||||
func scheduler(jobs chan FileChunk, chunks []FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
jobs <- chunk
|
||||
}
|
||||
close(jobs)
|
||||
}
|
||||
|
||||
func getTotalBytes(chunks []FileChunk) int64 {
|
||||
var tb int64
|
||||
for _, chunk := range chunks {
|
||||
tb += chunk.Size
|
||||
}
|
||||
return tb
|
||||
}
|
||||
|
||||
// uploadFile is a concurrent upload, without checkpoint
|
||||
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
chunks, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
abortOptions := ChoiceAbortPartOption(options)
|
||||
|
||||
// Initialize the multipart upload
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
var completedBytes int64
|
||||
totalBytes := getTotalBytes(chunks)
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the worker coroutine
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule the jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the upload finished
|
||||
completed := 0
|
||||
parts := make([]UploadPart, len(chunks))
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
parts[part.PartNumber-1] = part
|
||||
completedBytes += chunks[part.PartNumber-1].Size
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multpart upload
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...)
|
||||
if err != nil {
|
||||
bucket.AbortMultipartUpload(imur, abortOptions...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----- concurrent upload with checkpoint -----
|
||||
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
|
||||
|
||||
type uploadCheckpoint struct {
|
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint file content's MD5
|
||||
FilePath string // Local file path
|
||||
FileStat cpStat // File state
|
||||
ObjectKey string // Key
|
||||
UploadID string // Upload ID
|
||||
Parts []cpPart // All parts of the local file
|
||||
}
|
||||
|
||||
type cpStat struct {
|
||||
Size int64 // File size
|
||||
LastModified time.Time // File's last modified time
|
||||
MD5 string // Local file's MD5
|
||||
}
|
||||
|
||||
type cpPart struct {
|
||||
Chunk FileChunk // File chunk
|
||||
Part UploadPart // Uploaded part
|
||||
IsCompleted bool // Upload complete flag
|
||||
}
|
||||
|
||||
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
|
||||
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
|
||||
// Compare the CP's magic number and MD5.
|
||||
cpb := cp
|
||||
cpb.MD5 = ""
|
||||
js, _ := json.Marshal(cpb)
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
|
||||
if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Make sure if the local file is updated.
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compare the file size, file's last modified time and file's MD5
|
||||
if cp.FileStat.Size != st.Size() ||
|
||||
!cp.FileStat.LastModified.Equal(st.ModTime()) ||
|
||||
cp.FileStat.MD5 != md {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load loads from the file
|
||||
func (cp *uploadCheckpoint) load(filePath string) error {
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(contents, cp)
|
||||
return err
|
||||
}
|
||||
|
||||
// dump dumps to the local file
|
||||
func (cp *uploadCheckpoint) dump(filePath string) error {
|
||||
bcp := *cp
|
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = ""
|
||||
js, err := json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum := md5.Sum(js)
|
||||
b64 := base64.StdEncoding.EncodeToString(sum[:])
|
||||
bcp.MD5 = b64
|
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode)
|
||||
}
|
||||
|
||||
// updatePart updates the part status
|
||||
func (cp *uploadCheckpoint) updatePart(part UploadPart) {
|
||||
cp.Parts[part.PartNumber-1].Part = part
|
||||
cp.Parts[part.PartNumber-1].IsCompleted = true
|
||||
}
|
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp *uploadCheckpoint) todoParts() []FileChunk {
|
||||
fcs := []FileChunk{}
|
||||
for _, part := range cp.Parts {
|
||||
if !part.IsCompleted {
|
||||
fcs = append(fcs, part.Chunk)
|
||||
}
|
||||
}
|
||||
return fcs
|
||||
}
|
||||
|
||||
// allParts returns all parts
|
||||
func (cp *uploadCheckpoint) allParts() []UploadPart {
|
||||
ps := []UploadPart{}
|
||||
for _, part := range cp.Parts {
|
||||
ps = append(ps, part.Part)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// getCompletedBytes returns completed bytes count
|
||||
func (cp *uploadCheckpoint) getCompletedBytes() int64 {
|
||||
var completedBytes int64
|
||||
for _, part := range cp.Parts {
|
||||
if part.IsCompleted {
|
||||
completedBytes += part.Chunk.Size
|
||||
}
|
||||
}
|
||||
return completedBytes
|
||||
}
|
||||
|
||||
// calcFileMD5 calculates the MD5 for the specified local file
|
||||
func calcFileMD5(filePath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// prepare initializes the multipart upload
|
||||
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
|
||||
// CP
|
||||
cp.Magic = uploadCpMagic
|
||||
cp.FilePath = filePath
|
||||
cp.ObjectKey = objectKey
|
||||
|
||||
// Local file
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
st, err := fd.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.Size = st.Size()
|
||||
cp.FileStat.LastModified = st.ModTime()
|
||||
md, err := calcFileMD5(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.FileStat.MD5 = md
|
||||
|
||||
// Chunks
|
||||
parts, err := SplitFileByPartSize(filePath, partSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.Parts = make([]cpPart, len(parts))
|
||||
for i, part := range parts {
|
||||
cp.Parts[i].Chunk = part
|
||||
cp.Parts[i].IsCompleted = false
|
||||
}
|
||||
|
||||
// Init load
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cp.UploadID = imur.UploadID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// complete completes the multipart upload and deletes the local CP files
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
|
||||
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
|
||||
Key: cp.ObjectKey, UploadID: cp.UploadID}
|
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadFileWithCp handles concurrent upload with checkpoint
|
||||
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
|
||||
listener := GetProgressListener(options)
|
||||
|
||||
partOptions := ChoiceTransferPartOption(options)
|
||||
completeOptions := ChoiceCompletePartOption(options)
|
||||
|
||||
// Load CP data
|
||||
ucp := uploadCheckpoint{}
|
||||
err := ucp.load(cpFilePath)
|
||||
if err != nil {
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
// Load error or the CP data is invalid.
|
||||
valid, err := ucp.isValid(filePath)
|
||||
if err != nil || !valid {
|
||||
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(cpFilePath)
|
||||
}
|
||||
|
||||
chunks := ucp.todoParts()
|
||||
imur := InitiateMultipartUploadResult{
|
||||
Bucket: bucket.BucketName,
|
||||
Key: objectKey,
|
||||
UploadID: ucp.UploadID}
|
||||
|
||||
jobs := make(chan FileChunk, len(chunks))
|
||||
results := make(chan UploadPart, len(chunks))
|
||||
failed := make(chan error)
|
||||
die := make(chan bool)
|
||||
|
||||
completedBytes := ucp.getCompletedBytes()
|
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Start the workers
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker}
|
||||
for w := 1; w <= routines; w++ {
|
||||
go worker(w, arg, jobs, results, failed, die)
|
||||
}
|
||||
|
||||
// Schedule jobs
|
||||
go scheduler(jobs, chunks)
|
||||
|
||||
// Waiting for the job finished
|
||||
completed := 0
|
||||
for completed < len(chunks) {
|
||||
select {
|
||||
case part := <-results:
|
||||
completed++
|
||||
ucp.updatePart(part)
|
||||
ucp.dump(cpFilePath)
|
||||
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size)
|
||||
publishProgress(listener, event)
|
||||
case err := <-failed:
|
||||
close(die)
|
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
|
||||
if completed >= len(chunks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
|
||||
publishProgress(listener, event)
|
||||
|
||||
// Complete the multipart upload
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions)
|
||||
return err
|
||||
}
|
@ -0,0 +1,534 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var sys_name string
|
||||
var sys_release string
|
||||
var sys_machine string
|
||||
|
||||
func init() {
|
||||
sys_name = runtime.GOOS
|
||||
sys_release = "-"
|
||||
sys_machine = runtime.GOARCH
|
||||
|
||||
if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
|
||||
sys_name = string(bytes.TrimSpace(out))
|
||||
}
|
||||
if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
|
||||
sys_release = string(bytes.TrimSpace(out))
|
||||
}
|
||||
if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
|
||||
sys_machine = string(bytes.TrimSpace(out))
|
||||
}
|
||||
}
|
||||
|
||||
// userAgent gets user agent
|
||||
// It has the SDK version information, OS information and GO version
|
||||
func userAgent() string {
|
||||
sys := getSysInfo()
|
||||
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
|
||||
sys.release, sys.machine, runtime.Version())
|
||||
}
|
||||
|
||||
type sysInfo struct {
|
||||
name string // OS name such as windows/Linux
|
||||
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
|
||||
machine string // CPU type amd64/x86_64
|
||||
}
|
||||
|
||||
// getSysInfo gets system info
|
||||
// gets the OS information and CPU type
|
||||
func getSysInfo() sysInfo {
|
||||
return sysInfo{name: sys_name, release: sys_release, machine: sys_machine}
|
||||
}
|
||||
|
||||
// GetRangeConfig gets the download range from the options.
|
||||
func GetRangeConfig(options []Option) (*UnpackedRange, error) {
|
||||
rangeOpt, err := FindOption(options, HTTPHeaderRange, nil)
|
||||
if err != nil || rangeOpt == nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseRange(rangeOpt.(string))
|
||||
}
|
||||
|
||||
// UnpackedRange
|
||||
type UnpackedRange struct {
|
||||
HasStart bool // Flag indicates if the start point is specified
|
||||
HasEnd bool // Flag indicates if the end point is specified
|
||||
Start int64 // Start point
|
||||
End int64 // End point
|
||||
}
|
||||
|
||||
// InvalidRangeError returns invalid range error
|
||||
func InvalidRangeError(r string) error {
|
||||
return fmt.Errorf("InvalidRange %s", r)
|
||||
}
|
||||
|
||||
func GetRangeString(unpackRange UnpackedRange) string {
|
||||
var strRange string
|
||||
if unpackRange.HasStart && unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End)
|
||||
} else if unpackRange.HasStart {
|
||||
strRange = fmt.Sprintf("%d-", unpackRange.Start)
|
||||
} else if unpackRange.HasEnd {
|
||||
strRange = fmt.Sprintf("-%d", unpackRange.End)
|
||||
}
|
||||
return strRange
|
||||
}
|
||||
|
||||
// ParseRange parse various styles of range such as bytes=M-N
|
||||
func ParseRange(normalizedRange string) (*UnpackedRange, error) {
|
||||
var err error
|
||||
hasStart := false
|
||||
hasEnd := false
|
||||
var start int64
|
||||
var end int64
|
||||
|
||||
// Bytes==M-N or ranges=M-N
|
||||
nrSlice := strings.Split(normalizedRange, "=")
|
||||
if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
|
||||
// Bytes=M-N,X-Y
|
||||
rSlice := strings.Split(nrSlice[1], ",")
|
||||
rStr := rSlice[0]
|
||||
|
||||
if strings.HasSuffix(rStr, "-") { // M-
|
||||
startStr := rStr[:len(rStr)-1]
|
||||
start, err = strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
} else if strings.HasPrefix(rStr, "-") { // -N
|
||||
len := rStr[1:]
|
||||
end, err = strconv.ParseInt(len, 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
if end == 0 { // -0
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
} else { // M-N
|
||||
valSlice := strings.Split(rStr, "-")
|
||||
if len(valSlice) != 2 {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
start, err = strconv.ParseInt(valSlice[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasStart = true
|
||||
end, err = strconv.ParseInt(valSlice[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, InvalidRangeError(normalizedRange)
|
||||
}
|
||||
hasEnd = true
|
||||
}
|
||||
|
||||
return &UnpackedRange{hasStart, hasEnd, start, end}, nil
|
||||
}
|
||||
|
||||
// AdjustRange returns adjusted range, adjust the range according to the length of the file
|
||||
func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) {
|
||||
if ur == nil {
|
||||
return 0, size
|
||||
}
|
||||
|
||||
if ur.HasStart && ur.HasEnd {
|
||||
start = ur.Start
|
||||
end = ur.End + 1
|
||||
if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
} else if ur.HasStart {
|
||||
start = ur.Start
|
||||
end = size
|
||||
if ur.Start < 0 || ur.Start >= size {
|
||||
start = 0
|
||||
}
|
||||
} else if ur.HasEnd {
|
||||
start = size - ur.End
|
||||
end = size
|
||||
if ur.End < 0 || ur.End > size {
|
||||
start = 0
|
||||
end = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// gets the current time in Unix time, in seconds.
|
||||
func GetNowSec() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC. The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64. Note that this
|
||||
// means the result of calling UnixNano on the zero Time is undefined.
|
||||
// gets the current time in Unix time, in nanoseconds.
|
||||
func GetNowNanoSec() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// GetNowGMT gets the current time in GMT format.
|
||||
func GetNowGMT() string {
|
||||
return time.Now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
// FileChunk is the file chunk definition
|
||||
type FileChunk struct {
|
||||
Number int // Chunk number
|
||||
Offset int64 // Chunk offset
|
||||
Size int64 // Chunk size.
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if int64(chunkNum) > stat.Size() {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (stat.Size() / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
|
||||
} else {
|
||||
chunk.Size = stat.Size() / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var chunkN = stat.Size() / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []FileChunk
|
||||
var chunk = FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if stat.Size()%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = stat.Size() % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetPartEnd calculates the end position
|
||||
func GetPartEnd(begin int64, total int64, per int64) int64 {
|
||||
if begin+per > total {
|
||||
return total - 1
|
||||
}
|
||||
return begin + per - 1
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var CrcTable = func() *crc64.Table {
|
||||
return crc64.MakeTable(crc64.ECMA)
|
||||
}
|
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var crc32Table = func() *crc32.Table {
|
||||
return crc32.MakeTable(crc32.IEEE)
|
||||
}
|
||||
|
||||
// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
|
||||
func ChoiceTransferPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil)
|
||||
if trafficLimit != nil {
|
||||
speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64)
|
||||
outOption = append(outOption, TrafficLimitHeader(speed))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
|
||||
func ChoiceCompletePartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
listener, _ := FindOption(options, progressListener, nil)
|
||||
if listener != nil {
|
||||
outOption = append(outOption, Progress(listener.(ProgressListener)))
|
||||
}
|
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil)
|
||||
if acl != nil {
|
||||
outOption = append(outOption, ObjectACL(ACLType(acl.(string))))
|
||||
}
|
||||
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
|
||||
if callback != nil {
|
||||
outOption = append(outOption, Callback(callback.(string)))
|
||||
}
|
||||
|
||||
callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil)
|
||||
if callbackVar != nil {
|
||||
outOption = append(outOption, CallbackVar(callbackVar.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil)
|
||||
if forbidOverWrite != nil {
|
||||
if forbidOverWrite.(string) == "true" {
|
||||
outOption = append(outOption, ForbidOverWrite(true))
|
||||
} else {
|
||||
outOption = append(outOption, ForbidOverWrite(false))
|
||||
}
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
|
||||
func ChoiceAbortPartOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
// ChoiceHeadObjectOption choices valid option supported by HeadObject
|
||||
func ChoiceHeadObjectOption(options []Option) []Option {
|
||||
var outOption []Option
|
||||
|
||||
// not select HTTPHeaderRange to get whole object length
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil)
|
||||
if payer != nil {
|
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string))))
|
||||
}
|
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil)
|
||||
if versionId != nil {
|
||||
outOption = append(outOption, VersionId(versionId.(string)))
|
||||
}
|
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil)
|
||||
if respHeader != nil {
|
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header)))
|
||||
}
|
||||
|
||||
return outOption
|
||||
}
|
||||
|
||||
func CheckBucketName(bucketName string) error {
|
||||
nameLen := len(bucketName)
|
||||
if nameLen < 3 || nameLen > 63 {
|
||||
return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen)
|
||||
}
|
||||
|
||||
for _, v := range bucketName {
|
||||
if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') {
|
||||
return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName)
|
||||
}
|
||||
}
|
||||
if bucketName[0] == '-' || bucketName[nameLen-1] == '-' {
|
||||
return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *bytes.Buffer:
|
||||
contentLength = int64(v.Len())
|
||||
case *bytes.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *strings.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *os.File:
|
||||
fInfo, fError := v.Stat()
|
||||
if fError != nil {
|
||||
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
|
||||
} else {
|
||||
contentLength = fInfo.Size()
|
||||
}
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *LimitedReadCloser:
|
||||
contentLength = int64(v.N)
|
||||
default:
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
||||
|
||||
func LimitReadCloser(r io.Reader, n int64) io.Reader {
|
||||
var lc LimitedReadCloser
|
||||
lc.R = r
|
||||
lc.N = n
|
||||
return &lc
|
||||
}
|
||||
|
||||
// LimitedRC support Close()
|
||||
type LimitedReadCloser struct {
|
||||
io.LimitedReader
|
||||
}
|
||||
|
||||
func (lc *LimitedReadCloser) Close() error {
|
||||
if closer, ok := lc.R.(io.ReadCloser); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DiscardReadCloser struct {
|
||||
RC io.ReadCloser
|
||||
Discard int
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Read(b []byte) (int, error) {
|
||||
n, err := drc.RC.Read(b)
|
||||
if drc.Discard == 0 || n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if n <= drc.Discard {
|
||||
drc.Discard -= n
|
||||
return 0, err
|
||||
}
|
||||
|
||||
realLen := n - drc.Discard
|
||||
copy(b[0:realLen], b[drc.Discard:n])
|
||||
drc.Discard = 0
|
||||
return realLen, err
|
||||
}
|
||||
|
||||
func (drc *DiscardReadCloser) Close() error {
|
||||
closer, ok := drc.RC.(io.ReadCloser)
|
||||
if ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) {
|
||||
for _, key := range keys {
|
||||
value, ok := params[key]
|
||||
if ok && value == "" {
|
||||
// convert "" to nil
|
||||
params[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func EscapeLFString(str string) string {
|
||||
var log bytes.Buffer
|
||||
for i := 0; i < len(str); i++ {
|
||||
if str[i] != '\n' {
|
||||
log.WriteByte(str[i])
|
||||
} else {
|
||||
log.WriteString("\\n")
|
||||
}
|
||||
}
|
||||
return log.String()
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,3 @@
|
||||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
@ -0,0 +1,93 @@
|
||||
// Package arn provides a parser for interacting with Amazon Resource Names.
|
||||
package arn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
arnDelimiter = ":"
|
||||
arnSections = 6
|
||||
arnPrefix = "arn:"
|
||||
|
||||
// zero-indexed
|
||||
sectionPartition = 1
|
||||
sectionService = 2
|
||||
sectionRegion = 3
|
||||
sectionAccountID = 4
|
||||
sectionResource = 5
|
||||
|
||||
// errors
|
||||
invalidPrefix = "arn: invalid prefix"
|
||||
invalidSections = "arn: not enough sections"
|
||||
)
|
||||
|
||||
// ARN captures the individual fields of an Amazon Resource Name.
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
|
||||
type ARN struct {
|
||||
// The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
|
||||
// other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
|
||||
// (Beijing) region is "aws-cn".
|
||||
Partition string
|
||||
|
||||
// The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
|
||||
// namespaces, see
|
||||
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
|
||||
Service string
|
||||
|
||||
// The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
|
||||
// component might be omitted.
|
||||
Region string
|
||||
|
||||
// The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
|
||||
// ARNs for some resources don't require an account number, so this component might be omitted.
|
||||
AccountID string
|
||||
|
||||
// The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
|
||||
// for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
|
||||
// resource name itself. Some services allows paths for resource names, as described in
|
||||
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
|
||||
Resource string
|
||||
}
|
||||
|
||||
// Parse parses an ARN into its constituent parts.
|
||||
//
|
||||
// Some example ARNs:
|
||||
// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
|
||||
// arn:aws:iam::123456789012:user/David
|
||||
// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
|
||||
// arn:aws:s3:::my_corporate_bucket/exampleobject.png
|
||||
func Parse(arn string) (ARN, error) {
|
||||
if !strings.HasPrefix(arn, arnPrefix) {
|
||||
return ARN{}, errors.New(invalidPrefix)
|
||||
}
|
||||
sections := strings.SplitN(arn, arnDelimiter, arnSections)
|
||||
if len(sections) != arnSections {
|
||||
return ARN{}, errors.New(invalidSections)
|
||||
}
|
||||
return ARN{
|
||||
Partition: sections[sectionPartition],
|
||||
Service: sections[sectionService],
|
||||
Region: sections[sectionRegion],
|
||||
AccountID: sections[sectionAccountID],
|
||||
Resource: sections[sectionResource],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsARN returns whether the given string is an ARN by looking for
|
||||
// whether the string starts with "arn:" and contains the correct number
|
||||
// of sections delimited by colons(:).
|
||||
func IsARN(arn string) bool {
|
||||
return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
|
||||
}
|
||||
|
||||
// String returns the canonical representation of the ARN
|
||||
func (arn ARN) String() string {
|
||||
return arnPrefix +
|
||||
arn.Partition + arnDelimiter +
|
||||
arn.Service + arnDelimiter +
|
||||
arn.Region + arnDelimiter +
|
||||
arn.AccountID + arnDelimiter +
|
||||
arn.Resource
|
||||
}
|
@ -0,0 +1,164 @@
|
||||
// Package awserr represents API error interface accessors for the SDK.
|
||||
package awserr
|
||||
|
||||
// An Error wraps lower level errors with code, message and an original error.
|
||||
// The underlying concrete error type may also satisfy other interfaces which
|
||||
// can be to used to obtain more specific information about the error.
|
||||
//
|
||||
// Calling Error() or String() will always include the full information about
|
||||
// an error based on its underlying type.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Get error details
|
||||
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
||||
//
|
||||
// // Prints out full error message, including original error if there was one.
|
||||
// log.Println("Error:", awsErr.Error())
|
||||
//
|
||||
// // Get original error
|
||||
// if origErr := awsErr.OrigErr(); origErr != nil {
|
||||
// // operate on original error.
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type Error interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
||||
// Returns the short phrase depicting the classification of the error.
|
||||
Code() string
|
||||
|
||||
// Returns the error details message.
|
||||
Message() string
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErr() error
|
||||
}
|
||||
|
||||
// BatchError is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occurred in the batch.
|
||||
//
|
||||
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||
// compatibility.
|
||||
type BatchError interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
||||
// Returns the short phrase depicting the classification of the error.
|
||||
Code() string
|
||||
|
||||
// Returns the error details message.
|
||||
Message() string
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occurred in the batch.
|
||||
//
|
||||
// Replaces BatchError
|
||||
type BatchedErrors interface {
|
||||
// Satisfy the base Error interface.
|
||||
Error
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// New returns an Error object described by the code, message, and origErr.
|
||||
//
|
||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||
// Error object and will instead be returned.
|
||||
func New(code, message string, origErr error) Error {
|
||||
var errs []error
|
||||
if origErr != nil {
|
||||
errs = append(errs, origErr)
|
||||
}
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||
// array of errors.
|
||||
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// A RequestFailure is an interface to extract request failure information from
|
||||
// an Error such as the request ID of the failed request returned by a service.
|
||||
// RequestFailures may not always have a requestID value if the request failed
|
||||
// prior to reaching the service such as a connection error.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if reqerr, ok := err.(RequestFailure); ok {
|
||||
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||
// } else {
|
||||
// log.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Combined with awserr.Error:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Generic AWS Error with Code, Message, and original error (if any)
|
||||
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
//
|
||||
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// // A service error occurred
|
||||
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type RequestFailure interface {
|
||||
Error
|
||||
|
||||
// The status code of the HTTP response.
|
||||
StatusCode() int
|
||||
|
||||
// The request ID returned by the service for a request failure. This will
|
||||
// be empty if no request ID is available such as the request failed due
|
||||
// to a connection error.
|
||||
RequestID() string
|
||||
}
|
||||
|
||||
// NewRequestFailure returns a wrapped error with additional information for
|
||||
// request status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
// that may be meaningful.
|
||||
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
||||
return newRequestError(err, statusCode, reqID)
|
||||
}
|
||||
|
||||
// UnmarshalError provides the interface for the SDK failing to unmarshal data.
|
||||
type UnmarshalError interface {
|
||||
awsError
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
|
||||
// the bytes that fail to unmarshal to the error.
|
||||
func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
|
||||
return &unmarshalError{
|
||||
awsError: New("UnmarshalError", msg, err),
|
||||
bytes: bytes,
|
||||
}
|
||||
}
|
@ -0,0 +1,221 @@
|
||||
package awserr
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// SprintError returns a string of the formatted error code.
|
||||
//
|
||||
// Both extra and origErr are optional. If they are included their lines
|
||||
// will be added, but if they are not included their lines will be ignored.
|
||||
func SprintError(code, message, extra string, origErr error) string {
|
||||
msg := fmt.Sprintf("%s: %s", code, message)
|
||||
if extra != "" {
|
||||
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
||||
}
|
||||
if origErr != nil {
|
||||
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// A baseError wraps the code and message which defines an error. It also
|
||||
// can be used to wrap an original error object.
|
||||
//
|
||||
// Should be used as the root for errors satisfying the awserr.Error. Also
|
||||
// for any error which does not fit into a specific error wrapper type.
|
||||
type baseError struct {
|
||||
// Classification of error
|
||||
code string
|
||||
|
||||
// Detailed information about error
|
||||
message string
|
||||
|
||||
// Optional original error this error is based off of. Allows building
|
||||
// chained errors.
|
||||
errs []error
|
||||
}
|
||||
|
||||
// newBaseError returns an error object for the code, message, and errors.
|
||||
//
|
||||
// code is a short no whitespace phrase depicting the classification of
|
||||
// the error that is being created.
|
||||
//
|
||||
// message is the free flow string containing detailed information about the
|
||||
// error.
|
||||
//
|
||||
// origErrs is the error objects which will be nested under the new errors to
|
||||
// be returned.
|
||||
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||
b := &baseError{
|
||||
code: code,
|
||||
message: message,
|
||||
errs: origErrs,
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See ErrorWithExtra for formatting.
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (b baseError) Error() string {
|
||||
size := len(b.errs)
|
||||
if size > 0 {
|
||||
return SprintError(b.code, b.message, "", errorList(b.errs))
|
||||
}
|
||||
|
||||
return SprintError(b.code, b.message, "", nil)
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (b baseError) String() string {
|
||||
return b.Error()
|
||||
}
|
||||
|
||||
// Code returns the short phrase depicting the classification of the error.
|
||||
func (b baseError) Code() string {
|
||||
return b.code
|
||||
}
|
||||
|
||||
// Message returns the error details message.
|
||||
func (b baseError) Message() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||
// error was set. This only returns the first element in the list. If the full
|
||||
// list is needed, use BatchedErrors.
|
||||
func (b baseError) OrigErr() error {
|
||||
switch len(b.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return b.errs[0]
|
||||
default:
|
||||
if err, ok := b.errs[0].(Error); ok {
|
||||
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||
}
|
||||
return NewBatchError("BatchedErrors",
|
||||
"multiple errors occurred", b.errs)
|
||||
}
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (b baseError) OrigErrs() []error {
|
||||
return b.errs
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the requestError struct and not conflict with the error.Error() method.
|
||||
type awsError Error
|
||||
|
||||
// A requestError wraps a request or service error.
|
||||
//
|
||||
// Composed of baseError for code, message, and original error.
|
||||
type requestError struct {
|
||||
awsError
|
||||
statusCode int
|
||||
requestID string
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
// newRequestError returns a wrapped error with additional information for
|
||||
// request status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
// that may be meaningful.
|
||||
//
|
||||
// Also wraps original errors via the baseError.
|
||||
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
||||
return &requestError{
|
||||
awsError: err,
|
||||
statusCode: statusCode,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// Satisfies the error interface.
|
||||
func (r requestError) Error() string {
|
||||
extra := fmt.Sprintf("status code: %d, request id: %s",
|
||||
r.statusCode, r.requestID)
|
||||
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (r requestError) String() string {
|
||||
return r.Error()
|
||||
}
|
||||
|
||||
// StatusCode returns the wrapped status code for the error
|
||||
func (r requestError) StatusCode() int {
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// RequestID returns the wrapped requestID
|
||||
func (r requestError) RequestID() string {
|
||||
return r.requestID
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (r requestError) OrigErrs() []error {
|
||||
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||
return b.OrigErrs()
|
||||
}
|
||||
return []error{r.OrigErr()}
|
||||
}
|
||||
|
||||
type unmarshalError struct {
|
||||
awsError
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// Satisfies the error interface.
|
||||
func (e unmarshalError) Error() string {
|
||||
extra := hex.Dump(e.bytes)
|
||||
return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (e unmarshalError) String() string {
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Bytes returns the bytes that failed to unmarshal.
|
||||
func (e unmarshalError) Bytes() []byte {
|
||||
return e.bytes
|
||||
}
|
||||
|
||||
// An error list that satisfies the golang interface
|
||||
type errorList []error
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (e errorList) Error() string {
|
||||
msg := ""
|
||||
// How do we want to handle the array size being zero
|
||||
if size := len(e); size > 0 {
|
||||
for i := 0; i < size; i++ {
|
||||
msg += e[i].Error()
|
||||
// We check the next index to see if it is within the slice.
|
||||
// If it is, then we append a newline. We do this, because unit tests
|
||||
// could be broken with the additional '\n'
|
||||
if i+1 < size {
|
||||
msg += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Copy deeply copies a src structure to dst. Useful for copying request and
|
||||
// response structures.
|
||||
//
|
||||
// Can copy between structs of different type, but will only copy fields which
|
||||
// are assignable, and exist in both structs. Fields which are not assignable,
|
||||
// or do not exist in both structs are ignored.
|
||||
func Copy(dst, src interface{}) {
|
||||
dstval := reflect.ValueOf(dst)
|
||||
if !dstval.IsValid() {
|
||||
panic("Copy dst cannot be nil")
|
||||
}
|
||||
|
||||
rcopy(dstval, reflect.ValueOf(src), true)
|
||||
}
|
||||
|
||||
// CopyOf returns a copy of src while also allocating the memory for dst.
|
||||
// src must be a pointer type or this operation will fail.
|
||||
func CopyOf(src interface{}) (dst interface{}) {
|
||||
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
||||
dst = dsti.Interface()
|
||||
rcopy(dsti, reflect.ValueOf(src), true)
|
||||
return
|
||||
}
|
||||
|
||||
// rcopy performs a recursive copy of values from the source to destination.
|
||||
//
|
||||
// root is used to skip certain aspects of the copy which are not valid
|
||||
// for the root node of a object.
|
||||
func rcopy(dst, src reflect.Value, root bool) {
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
switch src.Kind() {
|
||||
case reflect.Ptr:
|
||||
if _, ok := src.Interface().(io.Reader); ok {
|
||||
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
||||
dst.Elem().Set(src)
|
||||
} else if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else {
|
||||
e := src.Type().Elem()
|
||||
if dst.CanSet() && !src.IsNil() {
|
||||
if _, ok := src.Interface().(*time.Time); !ok {
|
||||
dst.Set(reflect.New(e))
|
||||
} else {
|
||||
tempValue := reflect.New(e)
|
||||
tempValue.Elem().Set(src.Elem())
|
||||
// Sets time.Time's unexported values
|
||||
dst.Set(tempValue)
|
||||
}
|
||||
}
|
||||
if src.Elem().IsValid() {
|
||||
// Keep the current root state since the depth hasn't changed
|
||||
rcopy(dst.Elem(), src.Elem(), root)
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
t := dst.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
name := t.Field(i).Name
|
||||
srcVal := src.FieldByName(name)
|
||||
dstVal := dst.FieldByName(name)
|
||||
if srcVal.IsValid() && dstVal.CanSet() {
|
||||
rcopy(dstVal, srcVal, false)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
||||
dst.Set(s)
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
rcopy(dst.Index(i), src.Index(i), false)
|
||||
}
|
||||
case reflect.Map:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeMap(src.Type())
|
||||
dst.Set(s)
|
||||
for _, k := range src.MapKeys() {
|
||||
v := src.MapIndex(k)
|
||||
v2 := reflect.New(v.Type()).Elem()
|
||||
rcopy(v2, v, false)
|
||||
dst.SetMapIndex(k, v2)
|
||||
}
|
||||
default:
|
||||
// Assign the value if possible. If its not assignable, the value would
|
||||
// need to be converted and the impact of that may be unexpected, or is
|
||||
// not compatible with the dst type.
|
||||
if src.Type().AssignableTo(dst.Type()) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
||||
// In addition to this, this method will also dereference the input values if
|
||||
// possible so the DeepEqual performed will not fail if one parameter is a
|
||||
// pointer and the other is not.
|
||||
//
|
||||
// DeepEqual will not perform indirection of nested values of the input parameters.
|
||||
func DeepEqual(a, b interface{}) bool {
|
||||
ra := reflect.Indirect(reflect.ValueOf(a))
|
||||
rb := reflect.Indirect(reflect.ValueOf(b))
|
||||
|
||||
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
||||
// If the elements are both nil, and of the same type they are equal
|
||||
// If they are of different types they are not equal
|
||||
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
||||
} else if raValid != rbValid {
|
||||
// Both values must be valid to be equal
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
||||
}
|
@ -0,0 +1,221 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jmespath/go-jmespath"
|
||||
)
|
||||
|
||||
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||
|
||||
// rValuesAtPath returns a slice of values found in value v. The values
|
||||
// in v are explored recursively so all nested values are collected.
|
||||
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
||||
pathparts := strings.Split(path, "||")
|
||||
if len(pathparts) > 1 {
|
||||
for _, pathpart := range pathparts {
|
||||
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
||||
if len(vals) > 0 {
|
||||
return vals
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
||||
components := strings.Split(path, ".")
|
||||
for len(values) > 0 && len(components) > 0 {
|
||||
var index *int64
|
||||
var indexStar bool
|
||||
c := strings.TrimSpace(components[0])
|
||||
if c == "" { // no actual component, illegal syntax
|
||||
return nil
|
||||
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
||||
// TODO normalize case for user
|
||||
return nil // don't support unexported fields
|
||||
}
|
||||
|
||||
// parse this component
|
||||
if m := indexRe.FindStringSubmatch(c); m != nil {
|
||||
c = m[1]
|
||||
if m[2] == "" {
|
||||
index = nil
|
||||
indexStar = true
|
||||
} else {
|
||||
i, _ := strconv.ParseInt(m[2], 10, 32)
|
||||
index = &i
|
||||
indexStar = false
|
||||
}
|
||||
}
|
||||
|
||||
nextvals := []reflect.Value{}
|
||||
for _, value := range values {
|
||||
// pull component name out of struct member
|
||||
if value.Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
if c == "*" { // pull all members
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
||||
nextvals = append(nextvals, f)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value = value.FieldByNameFunc(func(name string) bool {
|
||||
if c == name {
|
||||
return true
|
||||
} else if !caseSensitive && strings.EqualFold(name, c) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
||||
if !value.IsNil() {
|
||||
value.Set(reflect.Zero(value.Type()))
|
||||
}
|
||||
return []reflect.Value{value}
|
||||
}
|
||||
|
||||
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
// TODO if the value is the terminus it should not be created
|
||||
// if the value to be set to its position is nil.
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
value = value.Elem()
|
||||
} else {
|
||||
value = reflect.Indirect(value)
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
|
||||
if indexStar || index != nil {
|
||||
nextvals = []reflect.Value{}
|
||||
for _, valItem := range values {
|
||||
value := reflect.Indirect(valItem)
|
||||
if value.Kind() != reflect.Slice {
|
||||
continue
|
||||
}
|
||||
|
||||
if indexStar { // grab all indices
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
idx := reflect.Indirect(value.Index(i))
|
||||
if idx.IsValid() {
|
||||
nextvals = append(nextvals, idx)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// pull out index
|
||||
i := int(*index)
|
||||
if i >= value.Len() { // check out of bounds
|
||||
if createPath {
|
||||
// TODO resize slice
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else if i < 0 { // support negative indexing
|
||||
i = value.Len() + i
|
||||
}
|
||||
value = reflect.Indirect(value.Index(i))
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
}
|
||||
|
||||
components = components[1:]
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// ValuesAtPath returns a list of values at the case insensitive lexical
|
||||
// path inside of a structure.
|
||||
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
||||
result, err := jmespath.Search(path, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(result)
|
||||
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||
return nil, nil
|
||||
}
|
||||
if s, ok := result.([]interface{}); ok {
|
||||
return s, err
|
||||
}
|
||||
if v.Kind() == reflect.Map && v.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if v.Kind() == reflect.Slice {
|
||||
out := make([]interface{}, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
out[i] = v.Index(i).Interface()
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
return []interface{}{result}, nil
|
||||
}
|
||||
|
||||
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
||||
// of a structure.
|
||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||
rvals := rValuesAtPath(i, path, true, false, v == nil)
|
||||
for _, rval := range rvals {
|
||||
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
||||
continue
|
||||
}
|
||||
setValue(rval, v)
|
||||
}
|
||||
}
|
||||
|
||||
func setValue(dstVal reflect.Value, src interface{}) {
|
||||
if dstVal.Kind() == reflect.Ptr {
|
||||
dstVal = reflect.Indirect(dstVal)
|
||||
}
|
||||
srcVal := reflect.ValueOf(src)
|
||||
|
||||
if !srcVal.IsValid() { // src is literal nil
|
||||
if dstVal.CanAddr() {
|
||||
// Convert to pointer so that pointer's value can be nil'ed
|
||||
// dstVal = dstVal.Addr()
|
||||
}
|
||||
dstVal.Set(reflect.Zero(dstVal.Type()))
|
||||
|
||||
} else if srcVal.Kind() == reflect.Ptr {
|
||||
if srcVal.IsNil() {
|
||||
srcVal = reflect.Zero(dstVal.Type())
|
||||
} else {
|
||||
srcVal = reflect.ValueOf(src).Elem()
|
||||
}
|
||||
dstVal.Set(srcVal)
|
||||
} else {
|
||||
dstVal.Set(srcVal)
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,123 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Prettify returns the string representation of a value.
|
||||
func Prettify(i interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
prettify(reflect.ValueOf(i), 0, &buf)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// prettify will recursively walk value v to build a textual
|
||||
// representation of the value.
|
||||
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
strtype := v.Type().String()
|
||||
if strtype == "time.Time" {
|
||||
fmt.Fprintf(buf, "%s", v.Interface())
|
||||
break
|
||||
} else if strings.HasPrefix(strtype, "io.") {
|
||||
buf.WriteString("<buffer>")
|
||||
break
|
||||
}
|
||||
|
||||
buf.WriteString("{\n")
|
||||
|
||||
names := []string{}
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
name := v.Type().Field(i).Name
|
||||
f := v.Field(i)
|
||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
for i, n := range names {
|
||||
val := v.FieldByName(n)
|
||||
ft, ok := v.Type().FieldByName(n)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type()))
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(n + ": ")
|
||||
|
||||
if tag := ft.Tag.Get("sensitive"); tag == "true" {
|
||||
buf.WriteString("<sensitive>")
|
||||
} else {
|
||||
prettify(val, indent+2, buf)
|
||||
}
|
||||
|
||||
if i < len(names)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
case reflect.Slice:
|
||||
strtype := v.Type().String()
|
||||
if strtype == "[]uint8" {
|
||||
fmt.Fprintf(buf, "<binary> len %d", v.Len())
|
||||
break
|
||||
}
|
||||
|
||||
nl, id, id2 := "", "", ""
|
||||
if v.Len() > 3 {
|
||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||
}
|
||||
buf.WriteString("[" + nl)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
buf.WriteString(id2)
|
||||
prettify(v.Index(i), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString("," + nl)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(nl + id + "]")
|
||||
case reflect.Map:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i, k := range v.MapKeys() {
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(k.String() + ": ")
|
||||
prettify(v.MapIndex(k), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
if !v.IsValid() {
|
||||
fmt.Fprint(buf, "<invalid value>")
|
||||
return
|
||||
}
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
format = "%q"
|
||||
case io.ReadSeeker, io.Reader:
|
||||
format = "buffer(%p)"
|
||||
}
|
||||
fmt.Fprintf(buf, format, v.Interface())
|
||||
}
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StringValue returns the string representation of a value.
|
||||
//
|
||||
// Deprecated: Use Prettify instead.
|
||||
func StringValue(i interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
stringValue(reflect.ValueOf(i), 0, &buf)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
ft := v.Type().Field(i)
|
||||
fv := v.Field(i)
|
||||
|
||||
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(ft.Name + ": ")
|
||||
|
||||
if tag := ft.Tag.Get("sensitive"); tag == "true" {
|
||||
buf.WriteString("<sensitive>")
|
||||
} else {
|
||||
stringValue(fv, indent+2, buf)
|
||||
}
|
||||
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
case reflect.Slice:
|
||||
nl, id, id2 := "", "", ""
|
||||
if v.Len() > 3 {
|
||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||
}
|
||||
buf.WriteString("[" + nl)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
buf.WriteString(id2)
|
||||
stringValue(v.Index(i), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString("," + nl)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(nl + id + "]")
|
||||
case reflect.Map:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i, k := range v.MapKeys() {
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(k.String() + ": ")
|
||||
stringValue(v.MapIndex(k), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
format = "%q"
|
||||
}
|
||||
fmt.Fprintf(buf, format, v.Interface())
|
||||
}
|
||||
}
|
@ -0,0 +1,94 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// A Config provides configuration to a service client instance.
|
||||
type Config struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
PartitionID string
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
SigningName string
|
||||
ResolvedRegion string
|
||||
|
||||
// States that the signing name did not come from a modeled source but
|
||||
// was derived based on other data. Used by service client constructors
|
||||
// to determine if the signin name can be overridden based on metadata the
|
||||
// service has.
|
||||
SigningNameDerived bool
|
||||
}
|
||||
|
||||
// ConfigProvider provides a generic way for a service client to receive
|
||||
// the ClientConfig without circular dependencies.
|
||||
type ConfigProvider interface {
|
||||
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
||||
}
|
||||
|
||||
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
|
||||
// resolve the endpoint automatically. The service client's endpoint must be
|
||||
// provided via the aws.Config.Endpoint field.
|
||||
type ConfigNoResolveEndpointProvider interface {
|
||||
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
|
||||
}
|
||||
|
||||
// A Client implements the base client request and response handling
|
||||
// used by all service clients.
|
||||
type Client struct {
|
||||
request.Retryer
|
||||
metadata.ClientInfo
|
||||
|
||||
Config aws.Config
|
||||
Handlers request.Handlers
|
||||
}
|
||||
|
||||
// New will return a pointer to a new initialized service client.
|
||||
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
||||
svc := &Client{
|
||||
Config: cfg,
|
||||
ClientInfo: info,
|
||||
Handlers: handlers.Copy(),
|
||||
}
|
||||
|
||||
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
||||
case ok:
|
||||
svc.Retryer = retryer
|
||||
case cfg.Retryer != nil && cfg.Logger != nil:
|
||||
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
||||
cfg.Logger.Log(s)
|
||||
fallthrough
|
||||
default:
|
||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = DefaultRetryerMaxNumRetries
|
||||
}
|
||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||
}
|
||||
|
||||
svc.AddDebugHandlers()
|
||||
|
||||
for _, option := range options {
|
||||
option(svc)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// NewRequest returns a new Request pointer for the service API
|
||||
// operation and parameters.
|
||||
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
||||
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
||||
}
|
||||
|
||||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||
// debug information.
|
||||
func (c *Client) AddDebugHandlers() {
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
@ -0,0 +1,177 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
)
|
||||
|
||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||
// most services. If you want to implement custom retry logic, you can implement the
|
||||
// request.Retryer interface.
|
||||
//
|
||||
type DefaultRetryer struct {
|
||||
// Num max Retries is the number of max retries that will be performed.
|
||||
// By default, this is zero.
|
||||
NumMaxRetries int
|
||||
|
||||
// MinRetryDelay is the minimum retry delay after which retry will be performed.
|
||||
// If not set, the value is 0ns.
|
||||
MinRetryDelay time.Duration
|
||||
|
||||
// MinThrottleRetryDelay is the minimum retry delay when throttled.
|
||||
// If not set, the value is 0ns.
|
||||
MinThrottleDelay time.Duration
|
||||
|
||||
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
|
||||
// If not set, the value is 0ns.
|
||||
MaxRetryDelay time.Duration
|
||||
|
||||
// MaxThrottleDelay is the maximum retry delay when throttled.
|
||||
// If not set, the value is 0ns.
|
||||
MaxThrottleDelay time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultRetryerMaxNumRetries sets maximum number of retries
|
||||
DefaultRetryerMaxNumRetries = 3
|
||||
|
||||
// DefaultRetryerMinRetryDelay sets minimum retry delay
|
||||
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
|
||||
|
||||
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
|
||||
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
|
||||
|
||||
// DefaultRetryerMaxRetryDelay sets maximum retry delay
|
||||
DefaultRetryerMaxRetryDelay = 300 * time.Second
|
||||
|
||||
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
|
||||
DefaultRetryerMaxThrottleDelay = 300 * time.Second
|
||||
)
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API request.
|
||||
func (d DefaultRetryer) MaxRetries() int {
|
||||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
// setRetryerDefaults sets the default values of the retryer if not set
|
||||
func (d *DefaultRetryer) setRetryerDefaults() {
|
||||
if d.MinRetryDelay == 0 {
|
||||
d.MinRetryDelay = DefaultRetryerMinRetryDelay
|
||||
}
|
||||
if d.MaxRetryDelay == 0 {
|
||||
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
|
||||
}
|
||||
if d.MinThrottleDelay == 0 {
|
||||
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
|
||||
}
|
||||
if d.MaxThrottleDelay == 0 {
|
||||
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
|
||||
}
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
|
||||
// if number of max retries is zero, no retries will be performed.
|
||||
if d.NumMaxRetries == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Sets default value for retryer members
|
||||
d.setRetryerDefaults()
|
||||
|
||||
// minDelay is the minimum retryer delay
|
||||
minDelay := d.MinRetryDelay
|
||||
|
||||
var initialDelay time.Duration
|
||||
|
||||
isThrottle := r.IsErrorThrottle()
|
||||
if isThrottle {
|
||||
if delay, ok := getRetryAfterDelay(r); ok {
|
||||
initialDelay = delay
|
||||
}
|
||||
minDelay = d.MinThrottleDelay
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
|
||||
// maxDelay the maximum retryer delay
|
||||
maxDelay := d.MaxRetryDelay
|
||||
|
||||
if isThrottle {
|
||||
maxDelay = d.MaxThrottleDelay
|
||||
}
|
||||
|
||||
var delay time.Duration
|
||||
|
||||
// Logic to cap the retry count based on the minDelay provided
|
||||
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
|
||||
if actualRetryCount < 63-retryCount {
|
||||
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
|
||||
if delay > maxDelay {
|
||||
delay = getJitterDelay(maxDelay / 2)
|
||||
}
|
||||
} else {
|
||||
delay = getJitterDelay(maxDelay / 2)
|
||||
}
|
||||
return delay + initialDelay
|
||||
}
|
||||
|
||||
// getJitterDelay returns a jittered delay for retry
|
||||
func getJitterDelay(duration time.Duration) time.Duration {
|
||||
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
|
||||
}
|
||||
|
||||
// ShouldRetry returns true if the request should be retried.
|
||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||
|
||||
// ShouldRetry returns false if number of max retries is 0.
|
||||
if d.NumMaxRetries == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable != nil {
|
||||
return *r.Retryable
|
||||
}
|
||||
return r.IsErrorRetryable() || r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
// This will look in the Retry-After header, RFC 7231, for how long
|
||||
// it will wait before attempting another request
|
||||
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
|
||||
if !canUseRetryAfterHeader(r) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delayStr := r.HTTPResponse.Header.Get("Retry-After")
|
||||
if len(delayStr) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delay, err := strconv.Atoi(delayStr)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return time.Duration(delay) * time.Second, true
|
||||
}
|
||||
|
||||
// Will look at the status code to see if the retry header pertains to
|
||||
// the status code.
|
||||
func canUseRetryAfterHeader(r *request.Request) bool {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 503:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
@ -0,0 +1,206 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http/httputil"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const logReqMsg = `DEBUG: Request %s/%s Details:
|
||||
---[ REQUEST POST-SIGN ]-----------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
|
||||
---[ REQUEST DUMP ERROR ]-----------------------------
|
||||
%s
|
||||
------------------------------------------------------`
|
||||
|
||||
type logWriter struct {
|
||||
// Logger is what we will use to log the payload of a response.
|
||||
Logger aws.Logger
|
||||
// buf stores the contents of what has been read
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func (logger *logWriter) Write(b []byte) (int, error) {
|
||||
return logger.buf.Write(b)
|
||||
}
|
||||
|
||||
type teeReaderCloser struct {
|
||||
// io.Reader will be a tee reader that is used during logging.
|
||||
// This structure will read from a body and write the contents to a logger.
|
||||
io.Reader
|
||||
// Source is used just to close when we are done reading.
|
||||
Source io.ReadCloser
|
||||
}
|
||||
|
||||
func (reader *teeReaderCloser) Close() error {
|
||||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
if logBody {
|
||||
if !bodySeekable {
|
||||
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
|
||||
}
|
||||
// Reset the request body because dumpRequest will re-wrap the
|
||||
// r.HTTPRequest's Body as a NoOpCloser and will not be reset after
|
||||
// read by the HTTP client reader.
|
||||
if err := r.Error; err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
---[ RESPONSE ]--------------------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
||||
---[ RESPONSE DUMP ERROR ]-----------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
if r.HTTPResponse == nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
|
||||
const handlerName = "awsdk.client.LogResponse.ResponseBody"
|
||||
|
||||
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
|
||||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
|
||||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
package metadata
|
||||
|
||||
// ClientInfo wraps immutable data from the client.Client structure.
|
||||
type ClientInfo struct {
|
||||
ServiceName string
|
||||
ServiceID string
|
||||
APIVersion string
|
||||
PartitionID string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
SigningRegion string
|
||||
JSONVersion string
|
||||
TargetPrefix string
|
||||
ResolvedRegion string
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// NoOpRetryer provides a retryer that performs no retries.
|
||||
// It should be used when we do not want retries to be performed.
|
||||
type NoOpRetryer struct{}
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
|
||||
func (d NoOpRetryer) MaxRetries() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
|
||||
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again;
|
||||
// since NoOpRetryer does not retry, RetryRules always returns 0.
|
||||
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
|
||||
return 0
|
||||
}
|
@ -0,0 +1,631 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
)
|
||||
|
||||
// UseServiceDefaultRetries instructs the config to use the service's own
|
||||
// default number of retries. This will be the default action if
|
||||
// Config.MaxRetries is nil also.
|
||||
const UseServiceDefaultRetries = -1
|
||||
|
||||
// RequestRetryer is an alias for a type that implements the request.Retryer
|
||||
// interface.
|
||||
type RequestRetryer interface{}
|
||||
|
||||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the defaults.DefaultConfig structure.
|
||||
//
|
||||
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// MaxRetries: aws.Int(3),
|
||||
// }))
|
||||
//
|
||||
// // Create S3 service client with a specific Region.
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// })
|
||||
type Config struct {
|
||||
// Enables verbose error printing of all credential chain errors.
|
||||
// Should be used when wanting to see all errors while attempting to
|
||||
// retrieve credentials.
|
||||
CredentialsChainVerboseErrors *bool
|
||||
|
||||
// The credentials object to use when signing requests. Defaults to a
|
||||
// chain of credential providers to search for credentials in environment
|
||||
// variables, shared credential file, and EC2 Instance Roles.
|
||||
Credentials *credentials.Credentials
|
||||
|
||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `nil` or the value to `""` to use the default generated endpoint.
|
||||
//
|
||||
// Note: You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
Endpoint *string
|
||||
|
||||
// The resolver to use for looking up endpoints for AWS service clients
|
||||
// to use based on region.
|
||||
EndpointResolver endpoints.Resolver
|
||||
|
||||
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
|
||||
// ShouldRetry regardless of whether or not if request.Retryable is set.
|
||||
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
|
||||
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
|
||||
// Proper handling of the request.Retryable field is important when setting this field.
|
||||
EnforceShouldRetryCheck *bool
|
||||
|
||||
// The region to send requests to. This parameter is required and must
|
||||
// be configured globally or on a per-client basis unless otherwise
|
||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||
// document.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
|
||||
// Regions and Endpoints.
|
||||
Region *string
|
||||
|
||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||
// to `false`.
|
||||
DisableSSL *bool
|
||||
|
||||
// The HTTP client to use when sending requests. Defaults to
|
||||
// `http.DefaultClient`.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// An integer value representing the logging level. The default log level
|
||||
// is zero (LogOff), which represents no logging. To enable logging set
|
||||
// to a LogLevel Value.
|
||||
LogLevel *LogLevelType
|
||||
|
||||
// The logger writer interface to write logging messages to. Defaults to
|
||||
// standard out.
|
||||
Logger Logger
|
||||
|
||||
// The maximum number of times that a request will be retried for failures.
|
||||
// Defaults to -1, which defers the max retry setting to the service
|
||||
// specific configuration.
|
||||
MaxRetries *int
|
||||
|
||||
// Retryer guides how HTTP requests should be retried in case of
|
||||
// recoverable failures.
|
||||
//
|
||||
// When nil or the value does not implement the request.Retryer interface,
|
||||
// the client.DefaultRetryer will be used.
|
||||
//
|
||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||
// the latter ignored.
|
||||
//
|
||||
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||
// the request.WithRetryer helper function:
|
||||
//
|
||||
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||
//
|
||||
Retryer RequestRetryer
|
||||
|
||||
// Disables semantic parameter validation, which validates input for
|
||||
// missing required fields and/or other semantic request input errors.
|
||||
DisableParamValidation *bool
|
||||
|
||||
// Disables the computation of request and response checksums, e.g.,
|
||||
// CRC32 checksums in Amazon DynamoDB.
|
||||
DisableComputeChecksums *bool
|
||||
|
||||
// Set this to `true` to force the request to use path-style addressing,
|
||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
|
||||
// will use virtual hosted bucket addressing when possible
|
||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||
//
|
||||
// Note: This configuration option is specific to the Amazon S3 service.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// for Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
|
||||
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||
// HTTP client not to send the body until the service responds with a
|
||||
// `continue` status. This is useful to prevent sending the request body
|
||||
// until after the request is authenticated, and validated.
|
||||
//
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||
//
|
||||
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||
// `ExpectContinueTimeout` for information on adjusting the continue wait
|
||||
// timeout. https://golang.org/pkg/net/http/#Transport
|
||||
//
|
||||
// You should use this flag to disable 100-Continue if you experience issues
|
||||
// with proxies or third party S3 compatible services.
|
||||
S3Disable100Continue *bool
|
||||
|
||||
// Set this to `true` to enable S3 Accelerate feature. For all operations
|
||||
// compatible with S3 Accelerate will use the accelerate endpoint for
|
||||
// requests. Requests not compatible will fall back to normal S3 requests.
|
||||
//
|
||||
// The bucket must be enable for accelerate to be used with S3 client with
|
||||
// accelerate enabled. If the bucket is not enabled for accelerate an error
|
||||
// will be returned. The bucket name must be DNS compatible to also work
|
||||
// with accelerate.
|
||||
S3UseAccelerate *bool
|
||||
|
||||
// S3DisableContentMD5Validation config option is temporarily disabled,
|
||||
// For S3 GetObject API calls, #1837.
|
||||
//
|
||||
// Set this to `true` to disable the S3 service client from automatically
|
||||
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
|
||||
// will also disable the SDK from performing object ContentMD5 validation
|
||||
// on GetObject API calls.
|
||||
S3DisableContentMD5Validation *bool
|
||||
|
||||
// Set this to `true` to have the S3 service client to use the region specified
|
||||
// in the ARN, when an ARN is provided as an argument to a bucket parameter.
|
||||
S3UseARNRegion *bool
|
||||
|
||||
// Set this to `true` to enable the SDK to unmarshal API response header maps to
|
||||
// normalized lower case map keys.
|
||||
//
|
||||
// For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case
|
||||
// Metadata member's map keys. The value of the header in the map is unaffected.
|
||||
//
|
||||
// The AWS SDK for Go v2, uses lower case header maps by default. The v1
|
||||
// SDK provides this opt-in for this option, for backwards compatibility.
|
||||
LowerCaseHeaderMaps *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the
|
||||
// EC2Metadata client to create a new http.Client. This options is only
|
||||
// meaningful if you're not already using a custom HTTP client with the
|
||||
// SDK. Enabled by default.
|
||||
//
|
||||
// Must be set and provided to the session.NewSession() in order to disable
|
||||
// the EC2Metadata overriding the timeout for default credentials chain.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(aws.NewConfig()
|
||||
// .WithEC2MetadataDisableTimeoutOverride(true)))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
EC2MetadataDisableTimeoutOverride *bool
|
||||
|
||||
// Instructs the endpoint to be generated for a service client to
|
||||
// be the dual stack endpoint. The dual stack endpoint will support
|
||||
// both IPv4 and IPv6 addressing.
|
||||
//
|
||||
// Setting this for a service which does not support dual stack will fail
|
||||
// to make requests. It is not recommended to set this value on the session
|
||||
// as it will apply to all service clients created with the session. Even
|
||||
// services which don't support dual stack endpoints.
|
||||
//
|
||||
// If the Endpoint config value is also provided the UseDualStack flag
|
||||
// will be ignored.
|
||||
//
|
||||
// Only supported with.
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// UseDualStack: aws.Bool(true),
|
||||
// })
|
||||
//
|
||||
// Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
|
||||
// UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
|
||||
// moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
|
||||
// precedence then this option.
|
||||
UseDualStack *bool
|
||||
|
||||
// Sets the resolver to resolve a dual-stack endpoint for the service.
|
||||
UseDualStackEndpoint endpoints.DualStackEndpointState
|
||||
|
||||
// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
|
||||
UseFIPSEndpoint endpoints.FIPSEndpointState
|
||||
|
||||
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||
// during the lifecycle of a request. Specifically this will be used for
|
||||
// request delays. This value should only be used for testing. To adjust
|
||||
// the delay of a request see the aws/client.DefaultRetryer and
|
||||
// aws/request.Retryer.
|
||||
//
|
||||
// SleepDelay will prevent any Context from being used for canceling retry
|
||||
// delay of an API operation. It is recommended to not use SleepDelay at all
|
||||
// and specify a Retryer instead.
|
||||
SleepDelay func(time.Duration)
|
||||
|
||||
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
|
||||
// Will default to false. This would only be used for empty directory names in s3 requests.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// DisableRestProtocolURICleaning: aws.Bool(true),
|
||||
// }))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("//foo//bar//moo"),
|
||||
// })
|
||||
DisableRestProtocolURICleaning *bool
|
||||
|
||||
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
|
||||
// have the definition in its model. By default, endpoint discovery is off.
|
||||
// To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// EnableEndpointDiscovery: aws.Bool(true),
|
||||
// }))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("/foo/bar/moo"),
|
||||
// })
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
|
||||
// request endpoint hosts with modeled information.
|
||||
//
|
||||
// Disabling this feature is useful when you want to use local endpoints
|
||||
// for testing that do not support the modeled host prefix pattern.
|
||||
DisableEndpointHostPrefix *bool
|
||||
|
||||
// STSRegionalEndpoint will enable regional or legacy endpoint resolving
|
||||
STSRegionalEndpoint endpoints.STSRegionalEndpoint
|
||||
|
||||
// S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving
|
||||
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
// methods to set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(aws.NewConfig().
|
||||
// WithMaxRetries(3),
|
||||
// ))
|
||||
//
|
||||
// // Create S3 service client with a specific Region.
|
||||
// svc := s3.New(sess, aws.NewConfig().
|
||||
// WithRegion("us-west-2"),
|
||||
// )
|
||||
func NewConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
||||
// a Config pointer.
|
||||
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
||||
c.CredentialsChainVerboseErrors = &verboseErrs
|
||||
return c
|
||||
}
|
||||
|
||||
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||
c.Credentials = creds
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||
c.Endpoint = &endpoint
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpointResolver sets a config EndpointResolver value returning a
|
||||
// Config pointer for chaining.
|
||||
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
|
||||
c.EndpointResolver = resolver
|
||||
return c
|
||||
}
|
||||
|
||||
// WithRegion sets a config Region value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithRegion(region string) *Config {
|
||||
c.Region = ®ion
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||
c.DisableSSL = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||
c.HTTPClient = client
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithMaxRetries(max int) *Config {
|
||||
c.MaxRetries = &max
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||
c.DisableParamValidation = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||
c.DisableComputeChecksums = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||
c.LogLevel = &level
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogger sets a config Logger value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogger(logger Logger) *Config {
|
||||
c.Logger = logger
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||
c.S3ForcePathStyle = &force
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||
// a Config pointer for chaining.
|
||||
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||
c.S3Disable100Continue = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||
c.S3UseAccelerate = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithS3DisableContentMD5Validation sets a config
|
||||
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
|
||||
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
|
||||
c.S3DisableContentMD5Validation = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithS3UseARNRegion sets a config S3UseARNRegion value and
|
||||
// returning a Config pointer for chaining
|
||||
func (c *Config) WithS3UseARNRegion(enable bool) *Config {
|
||||
c.S3UseARNRegion = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithUseDualStack(enable bool) *Config {
|
||||
c.UseDualStack = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||
c.EC2MetadataDisableTimeoutOverride = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithSleepDelay overrides the function used to sleep while waiting for the
|
||||
// next retry. Defaults to time.Sleep.
|
||||
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||
c.SleepDelay = fn
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
|
||||
func (c *Config) WithEndpointDiscovery(t bool) *Config {
|
||||
c.EnableEndpointDiscovery = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
|
||||
// when making requests.
|
||||
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
|
||||
c.DisableEndpointHostPrefix = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
|
||||
// when resolving the endpoint for a service
|
||||
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
|
||||
c.STSRegionalEndpoint = sre
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag
|
||||
// when resolving the endpoint for a service
|
||||
func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config {
|
||||
c.S3UsEast1RegionalEndpoint = sre
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
|
||||
c.LowerCaseHeaderMaps = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
|
||||
c.DisableRestProtocolURICleaning = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if other.CredentialsChainVerboseErrors != nil {
|
||||
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
||||
}
|
||||
|
||||
if other.Credentials != nil {
|
||||
dst.Credentials = other.Credentials
|
||||
}
|
||||
|
||||
if other.Endpoint != nil {
|
||||
dst.Endpoint = other.Endpoint
|
||||
}
|
||||
|
||||
if other.EndpointResolver != nil {
|
||||
dst.EndpointResolver = other.EndpointResolver
|
||||
}
|
||||
|
||||
if other.Region != nil {
|
||||
dst.Region = other.Region
|
||||
}
|
||||
|
||||
if other.DisableSSL != nil {
|
||||
dst.DisableSSL = other.DisableSSL
|
||||
}
|
||||
|
||||
if other.HTTPClient != nil {
|
||||
dst.HTTPClient = other.HTTPClient
|
||||
}
|
||||
|
||||
if other.LogLevel != nil {
|
||||
dst.LogLevel = other.LogLevel
|
||||
}
|
||||
|
||||
if other.Logger != nil {
|
||||
dst.Logger = other.Logger
|
||||
}
|
||||
|
||||
if other.MaxRetries != nil {
|
||||
dst.MaxRetries = other.MaxRetries
|
||||
}
|
||||
|
||||
if other.Retryer != nil {
|
||||
dst.Retryer = other.Retryer
|
||||
}
|
||||
|
||||
if other.DisableParamValidation != nil {
|
||||
dst.DisableParamValidation = other.DisableParamValidation
|
||||
}
|
||||
|
||||
if other.DisableComputeChecksums != nil {
|
||||
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||
}
|
||||
|
||||
if other.S3ForcePathStyle != nil {
|
||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||
}
|
||||
|
||||
if other.S3Disable100Continue != nil {
|
||||
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||
}
|
||||
|
||||
if other.S3UseAccelerate != nil {
|
||||
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||
}
|
||||
|
||||
if other.S3DisableContentMD5Validation != nil {
|
||||
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
|
||||
}
|
||||
|
||||
if other.S3UseARNRegion != nil {
|
||||
dst.S3UseARNRegion = other.S3UseARNRegion
|
||||
}
|
||||
|
||||
if other.UseDualStack != nil {
|
||||
dst.UseDualStack = other.UseDualStack
|
||||
}
|
||||
|
||||
if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
|
||||
dst.UseDualStackEndpoint = other.UseDualStackEndpoint
|
||||
}
|
||||
|
||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||
}
|
||||
|
||||
if other.SleepDelay != nil {
|
||||
dst.SleepDelay = other.SleepDelay
|
||||
}
|
||||
|
||||
if other.DisableRestProtocolURICleaning != nil {
|
||||
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
|
||||
}
|
||||
|
||||
if other.EnforceShouldRetryCheck != nil {
|
||||
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
|
||||
}
|
||||
|
||||
if other.EnableEndpointDiscovery != nil {
|
||||
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
|
||||
}
|
||||
|
||||
if other.DisableEndpointHostPrefix != nil {
|
||||
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
|
||||
}
|
||||
|
||||
if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
|
||||
dst.STSRegionalEndpoint = other.STSRegionalEndpoint
|
||||
}
|
||||
|
||||
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
|
||||
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
|
||||
}
|
||||
|
||||
if other.LowerCaseHeaderMaps != nil {
|
||||
dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
|
||||
}
|
||||
|
||||
if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
|
||||
dst.UseDualStackEndpoint = other.UseDualStackEndpoint
|
||||
}
|
||||
|
||||
if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset {
|
||||
dst.UseFIPSEndpoint = other.UseFIPSEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
// configurations are provided they will be merged into the new config returned.
|
||||
func (c *Config) Copy(cfgs ...*Config) *Config {
|
||||
dst := &Config{}
|
||||
dst.MergeIn(c)
|
||||
|
||||
for _, cfg := range cfgs {
|
||||
dst.MergeIn(cfg)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
||||
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
Value(key interface{}) interface{}
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// Context is an alias of the Go stdlib's context.Context interface.
|
||||
// It can be used within the SDK's API operation "WithContext" methods.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context = context.Context
|
@ -0,0 +1,23 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/internal/context"
|
||||
)
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return context.BackgroundCtx
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return context.Background()
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,918 @@
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// String returns a pointer to the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// StringValue returns the value of the string pointer passed in or
|
||||
// "" if the pointer is nil.
|
||||
func StringValue(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// StringSlice converts a slice of string values into a slice of
|
||||
// string pointers
|
||||
func StringSlice(src []string) []*string {
|
||||
dst := make([]*string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueSlice converts a slice of string pointers into a slice of
|
||||
// string values
|
||||
func StringValueSlice(src []*string) []string {
|
||||
dst := make([]string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringMap converts a string map of string values into a string
|
||||
// map of string pointers
|
||||
func StringMap(src map[string]string) map[string]*string {
|
||||
dst := make(map[string]*string)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueMap converts a string map of string pointers into a string
|
||||
// map of string values
|
||||
func StringValueMap(src map[string]*string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// BoolValue returns the value of the bool pointer passed in or
|
||||
// false if the pointer is nil.
|
||||
func BoolValue(v *bool) bool {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BoolSlice converts a slice of bool values into a slice of
|
||||
// bool pointers
|
||||
func BoolSlice(src []bool) []*bool {
|
||||
dst := make([]*bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||
// bool values
|
||||
func BoolValueSlice(src []*bool) []bool {
|
||||
dst := make([]bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolMap converts a string map of bool values into a string
|
||||
// map of bool pointers
|
||||
func BoolMap(src map[string]bool) map[string]*bool {
|
||||
dst := make(map[string]*bool)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueMap converts a string map of bool pointers into a string
|
||||
// map of bool values
|
||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||
dst := make(map[string]bool)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
||||
// IntValue returns the value of the int pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func IntValue(v *int) int {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IntSlice converts a slice of int values into a slice of
|
||||
// int pointers
|
||||
func IntSlice(src []int) []*int {
|
||||
dst := make([]*int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueSlice converts a slice of int pointers into a slice of
|
||||
// int values
|
||||
func IntValueSlice(src []*int) []int {
|
||||
dst := make([]int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntMap converts a string map of int values into a string
|
||||
// map of int pointers
|
||||
func IntMap(src map[string]int) map[string]*int {
|
||||
dst := make(map[string]*int)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueMap converts a string map of int pointers into a string
|
||||
// map of int values
|
||||
func IntValueMap(src map[string]*int) map[string]int {
|
||||
dst := make(map[string]int)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint returns a pointer to the uint value passed in.
|
||||
func Uint(v uint) *uint {
|
||||
return &v
|
||||
}
|
||||
|
||||
// UintValue returns the value of the uint pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func UintValue(v *uint) uint {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// UintSlice converts a slice of uint values uinto a slice of
|
||||
// uint pointers
|
||||
func UintSlice(src []uint) []*uint {
|
||||
dst := make([]*uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueSlice converts a slice of uint pointers uinto a slice of
|
||||
// uint values
|
||||
func UintValueSlice(src []*uint) []uint {
|
||||
dst := make([]uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintMap converts a string map of uint values uinto a string
|
||||
// map of uint pointers
|
||||
func UintMap(src map[string]uint) map[string]*uint {
|
||||
dst := make(map[string]*uint)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueMap converts a string map of uint pointers uinto a string
|
||||
// map of uint values
|
||||
func UintValueMap(src map[string]*uint) map[string]uint {
|
||||
dst := make(map[string]uint)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8 returns a pointer to the int8 value passed in.
|
||||
func Int8(v int8) *int8 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int8Value returns the value of the int8 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int8Value(v *int8) int8 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int8Slice converts a slice of int8 values into a slice of
|
||||
// int8 pointers
|
||||
func Int8Slice(src []int8) []*int8 {
|
||||
dst := make([]*int8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8ValueSlice converts a slice of int8 pointers into a slice of
|
||||
// int8 values
|
||||
func Int8ValueSlice(src []*int8) []int8 {
|
||||
dst := make([]int8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8Map converts a string map of int8 values into a string
|
||||
// map of int8 pointers
|
||||
func Int8Map(src map[string]int8) map[string]*int8 {
|
||||
dst := make(map[string]*int8)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8ValueMap converts a string map of int8 pointers into a string
|
||||
// map of int8 values
|
||||
func Int8ValueMap(src map[string]*int8) map[string]int8 {
|
||||
dst := make(map[string]int8)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16 returns a pointer to the int16 value passed in.
|
||||
func Int16(v int16) *int16 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int16Value returns the value of the int16 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int16Value(v *int16) int16 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int16Slice converts a slice of int16 values into a slice of
|
||||
// int16 pointers
|
||||
func Int16Slice(src []int16) []*int16 {
|
||||
dst := make([]*int16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16ValueSlice converts a slice of int16 pointers into a slice of
|
||||
// int16 values
|
||||
func Int16ValueSlice(src []*int16) []int16 {
|
||||
dst := make([]int16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16Map converts a string map of int16 values into a string
|
||||
// map of int16 pointers
|
||||
func Int16Map(src map[string]int16) map[string]*int16 {
|
||||
dst := make(map[string]*int16)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16ValueMap converts a string map of int16 pointers into a string
|
||||
// map of int16 values
|
||||
func Int16ValueMap(src map[string]*int16) map[string]int16 {
|
||||
dst := make(map[string]int16)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32 returns a pointer to the int32 value passed in.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32Value returns the value of the int32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int32Value(v *int32) int32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int32Slice converts a slice of int32 values into a slice of
|
||||
// int32 pointers
|
||||
func Int32Slice(src []int32) []*int32 {
|
||||
dst := make([]*int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueSlice converts a slice of int32 pointers into a slice of
|
||||
// int32 values
|
||||
func Int32ValueSlice(src []*int32) []int32 {
|
||||
dst := make([]int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32Map converts a string map of int32 values into a string
|
||||
// map of int32 pointers
|
||||
func Int32Map(src map[string]int32) map[string]*int32 {
|
||||
dst := make(map[string]*int32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueMap converts a string map of int32 pointers into a string
|
||||
// map of int32 values
|
||||
func Int32ValueMap(src map[string]*int32) map[string]int32 {
|
||||
dst := make(map[string]int32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int64Value returns the value of the int64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int64Value(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int64Slice converts a slice of int64 values into a slice of
|
||||
// int64 pointers
|
||||
func Int64Slice(src []int64) []*int64 {
|
||||
dst := make([]*int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||
// int64 values
|
||||
func Int64ValueSlice(src []*int64) []int64 {
|
||||
dst := make([]int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64Map converts a string map of int64 values into a string
|
||||
// map of int64 pointers
|
||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||
dst := make(map[string]*int64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||
// map of int64 values
|
||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||
dst := make(map[string]int64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8 returns a pointer to the uint8 value passed in.
|
||||
func Uint8(v uint8) *uint8 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint8Value returns the value of the uint8 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint8Value(v *uint8) uint8 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint8Slice converts a slice of uint8 values into a slice of
|
||||
// uint8 pointers
|
||||
func Uint8Slice(src []uint8) []*uint8 {
|
||||
dst := make([]*uint8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
|
||||
// uint8 values
|
||||
func Uint8ValueSlice(src []*uint8) []uint8 {
|
||||
dst := make([]uint8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8Map converts a string map of uint8 values into a string
|
||||
// map of uint8 pointers
|
||||
func Uint8Map(src map[string]uint8) map[string]*uint8 {
|
||||
dst := make(map[string]*uint8)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8ValueMap converts a string map of uint8 pointers into a string
|
||||
// map of uint8 values
|
||||
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
|
||||
dst := make(map[string]uint8)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16 returns a pointer to the uint16 value passed in.
|
||||
func Uint16(v uint16) *uint16 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint16Value returns the value of the uint16 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint16Value(v *uint16) uint16 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint16Slice converts a slice of uint16 values into a slice of
|
||||
// uint16 pointers
|
||||
func Uint16Slice(src []uint16) []*uint16 {
|
||||
dst := make([]*uint16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
|
||||
// uint16 values
|
||||
func Uint16ValueSlice(src []*uint16) []uint16 {
|
||||
dst := make([]uint16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16Map converts a string map of uint16 values into a string
|
||||
// map of uint16 pointers
|
||||
func Uint16Map(src map[string]uint16) map[string]*uint16 {
|
||||
dst := make(map[string]*uint16)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueMap converts a string map of uint16 pointers into a string
|
||||
// map of uint16 values
|
||||
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
|
||||
dst := make(map[string]uint16)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32 returns a pointer to the uint32 value passed in.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32Value returns the value of the uint32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint32Value(v *uint32) uint32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint32Slice converts a slice of uint32 values into a slice of
|
||||
// uint32 pointers
|
||||
func Uint32Slice(src []uint32) []*uint32 {
|
||||
dst := make([]*uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
|
||||
// uint32 values
|
||||
func Uint32ValueSlice(src []*uint32) []uint32 {
|
||||
dst := make([]uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32Map converts a string map of uint32 values into a string
|
||||
// map of uint32 pointers
|
||||
func Uint32Map(src map[string]uint32) map[string]*uint32 {
|
||||
dst := make(map[string]*uint32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueMap converts a string map of uint32 pointers into a string
|
||||
// map of uint32 values
|
||||
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
|
||||
dst := make(map[string]uint32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64 returns a pointer to the uint64 value passed in.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64Value returns the value of the uint64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint64Value(v *uint64) uint64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint64Slice converts a slice of uint64 values into a slice of
|
||||
// uint64 pointers
|
||||
func Uint64Slice(src []uint64) []*uint64 {
|
||||
dst := make([]*uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
|
||||
// uint64 values
|
||||
func Uint64ValueSlice(src []*uint64) []uint64 {
|
||||
dst := make([]uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64Map converts a string map of uint64 values into a string
|
||||
// map of uint64 pointers
|
||||
func Uint64Map(src map[string]uint64) map[string]*uint64 {
|
||||
dst := make(map[string]*uint64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueMap converts a string map of uint64 pointers into a string
|
||||
// map of uint64 values
|
||||
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
|
||||
dst := make(map[string]uint64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32 returns a pointer to the float32 value passed in.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32Value returns the value of the float32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float32Value(v *float32) float32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float32Slice converts a slice of float32 values into a slice of
|
||||
// float32 pointers
|
||||
func Float32Slice(src []float32) []*float32 {
|
||||
dst := make([]*float32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueSlice converts a slice of float32 pointers into a slice of
|
||||
// float32 values
|
||||
func Float32ValueSlice(src []*float32) []float32 {
|
||||
dst := make([]float32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32Map converts a string map of float32 values into a string
|
||||
// map of float32 pointers
|
||||
func Float32Map(src map[string]float32) map[string]*float32 {
|
||||
dst := make(map[string]*float32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueMap converts a string map of float32 pointers into a string
|
||||
// map of float32 values
|
||||
func Float32ValueMap(src map[string]*float32) map[string]float32 {
|
||||
dst := make(map[string]float32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64Value returns the value of the float64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float64Value(v *float64) float64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64Slice converts a slice of float64 values into a slice of
|
||||
// float64 pointers
|
||||
func Float64Slice(src []float64) []*float64 {
|
||||
dst := make([]*float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||
// float64 values
|
||||
func Float64ValueSlice(src []*float64) []float64 {
|
||||
dst := make([]float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64Map converts a string map of float64 values into a string
|
||||
// map of float64 pointers
|
||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||
dst := make(map[string]*float64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||
// map of float64 values
|
||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||
dst := make(map[string]float64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
||||
// TimeValue returns the value of the time.Time pointer passed in or
|
||||
// time.Time{} if the pointer is nil.
|
||||
func TimeValue(v *time.Time) time.Time {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// SecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing seconds since Epoch or time.Time{} if the pointer is nil.
|
||||
func SecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix((*v / 1000), 0)
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
|
||||
func MillisecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix(0, (*v * 1000000))
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||
//
|
||||
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||
// their unix time values to be in milliseconds.
|
||||
//
|
||||
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||
func TimeUnixMilli(t time.Time) int64 {
|
||||
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
dst := make([]*time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||
// time.Time values
|
||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||
dst := make([]time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeMap converts a string map of time.Time values into a string
|
||||
// map of time.Time pointers
|
||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||
dst := make(map[string]*time.Time)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||
// map of time.Time values
|
||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||
dst := make(map[string]time.Time)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
@ -0,0 +1,232 @@
|
||||
package corehandlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// Interface for matching types which also have a Len method.
|
||||
type lener interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||
//
|
||||
// The Content-Length will only be added to the request if the length of the body
|
||||
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||
// header is <= 0, the header will also be stripped.
|
||||
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||
var length int64
|
||||
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||
} else {
|
||||
if r.Body != nil {
|
||||
var err error
|
||||
length, err = aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
r.HTTPRequest.ContentLength = length
|
||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
} else {
|
||||
r.HTTPRequest.ContentLength = 0
|
||||
r.HTTPRequest.Header.Del("Content-Length")
|
||||
}
|
||||
}}
|
||||
|
||||
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||
|
||||
// ValidateReqSigHandler is a request handler to ensure that the request's
|
||||
// signature doesn't expire before it is sent. This can happen when a request
|
||||
// is built and signed significantly before it is sent. Or significant delays
|
||||
// occur when retrying requests that would cause the signature to expire.
|
||||
var ValidateReqSigHandler = request.NamedHandler{
|
||||
Name: "core.ValidateReqSigHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
// Unsigned requests are not signed
|
||||
if r.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
signedTime := r.Time
|
||||
if !r.LastSignedAt.IsZero() {
|
||||
signedTime = r.LastSignedAt
|
||||
}
|
||||
|
||||
// 5 minutes to allow for some clock skew/delays in transmission.
|
||||
// Would be improved with aws/aws-sdk-go#423
|
||||
if signedTime.Add(5 * time.Minute).After(time.Now()) {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("request expired, resigning")
|
||||
r.Sign()
|
||||
},
|
||||
}
|
||||
|
||||
// SendHandler is a request handler to send service request using HTTP client.
|
||||
var SendHandler = request.NamedHandler{
|
||||
Name: "core.SendHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
sender := sendFollowRedirects
|
||||
if r.DisableFollowRedirects {
|
||||
sender = sendWithoutFollowRedirects
|
||||
}
|
||||
|
||||
if request.NoBody == r.HTTPRequest.Body {
|
||||
// Strip off the request body if the NoBody reader was used as a
|
||||
// place holder for a request body. This prevents the SDK from
|
||||
// making requests with a request body when it would be invalid
|
||||
// to do so.
|
||||
//
|
||||
// Use a shallow copy of the http.Request to ensure the race condition
|
||||
// of transport on Body will not trigger
|
||||
reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
|
||||
reqCopy.Body = nil
|
||||
r.HTTPRequest = &reqCopy
|
||||
defer func() {
|
||||
r.HTTPRequest = reqOrig
|
||||
}()
|
||||
}
|
||||
|
||||
var err error
|
||||
r.HTTPResponse, err = sender(r)
|
||||
if err != nil {
|
||||
handleSendError(r, err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func sendFollowRedirects(r *request.Request) (*http.Response, error) {
|
||||
return r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||
}
|
||||
|
||||
func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
|
||||
transport := r.Config.HTTPClient.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
return transport.RoundTrip(r.HTTPRequest)
|
||||
}
|
||||
|
||||
func handleSendError(r *request.Request, err error) {
|
||||
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||
// the body.
|
||||
if r.HTTPResponse != nil {
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
// Capture the case where url.Error is returned for error processing
|
||||
// response. e.g. 301 without location header comes back as string
|
||||
// error and r.HTTPResponse is nil. Other URL redirect errors will
|
||||
// comeback in a similar method.
|
||||
if e, ok := err.(*url.Error); ok && e.Err != nil {
|
||||
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
||||
code, _ := strconv.ParseInt(s[1], 10, 64)
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(code),
|
||||
Status: http.StatusText(int(code)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if r.HTTPResponse == nil {
|
||||
// Add a dummy request response object to ensure the HTTPResponse
|
||||
// value is consistent.
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(0),
|
||||
Status: http.StatusText(int(0)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
}
|
||||
// Catch all request errors, and let the default retrier determine
|
||||
// if the error is retryable.
|
||||
r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err)
|
||||
|
||||
// Override the error with a context canceled error, if that was canceled.
|
||||
ctx := r.Context()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", ctx.Err())
|
||||
r.Retryable = aws.Bool(false)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateResponseHandler is a request handler to validate service response.
|
||||
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
||||
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||
// this may be replaced by an UnmarshalError handler
|
||||
r.Error = awserr.New("UnknownError", "unknown error", r.Error)
|
||||
}
|
||||
}}
|
||||
|
||||
// AfterRetryHandler performs final checks to determine if the request should
|
||||
// be retried and how long to delay.
|
||||
var AfterRetryHandler = request.NamedHandler{
|
||||
Name: "core.AfterRetryHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
||||
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||
}
|
||||
|
||||
if r.WillRetry() {
|
||||
r.RetryDelay = r.RetryRules(r)
|
||||
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility and testing
|
||||
sleepFn(r.RetryDelay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
}
|
||||
|
||||
// when the expired token exception occurs the credentials
|
||||
// need to be expired locally so that the next request to
|
||||
// get credentials will trigger a credentials refresh.
|
||||
if r.IsErrorExpired() {
|
||||
r.Config.Credentials.Expire()
|
||||
}
|
||||
|
||||
r.RetryCount++
|
||||
r.Error = nil
|
||||
}
|
||||
}}
|
||||
|
||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||
// region is not valid.
|
||||
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
|
||||
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
|
||||
r.Error = aws.ErrMissingRegion
|
||||
} else if r.ClientInfo.Endpoint == "" {
|
||||
// Was any endpoint provided by the user, or one was derived by the
|
||||
// SDK's endpoint resolver?
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}}
|
@ -0,0 +1,17 @@
|
||||
package corehandlers
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||
// Validating parameters only has meaning if done prior to the request being sent.
|
||||
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
if v, ok := r.Params.(request.Validator); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
r.Error = err
|
||||
}
|
||||
}
|
||||
}}
|
@ -0,0 +1,37 @@
|
||||
package corehandlers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
|
||||
// to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
const execEnvVar = `AWS_EXECUTION_ENV`
|
||||
const execEnvUAKey = `exec-env`
|
||||
|
||||
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
|
||||
// execution environment to the user agent.
|
||||
//
|
||||
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
|
||||
// appended to the user agent string.
|
||||
var AddHostExecEnvUserAgentHander = request.NamedHandler{
|
||||
Name: "core.AddHostExecEnvUserAgentHander",
|
||||
Fn: func(r *request.Request) {
|
||||
v := os.Getenv(execEnvVar)
|
||||
if len(v) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
|
||||
},
|
||||
}
|
@ -0,0 +1,100 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
||||
// providers in the ChainProvider.
|
||||
//
|
||||
// This has been deprecated. For verbose error messaging set
|
||||
// aws.Config.CredentialsChainVerboseErrors to true.
|
||||
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
||||
`no valid providers in chain. Deprecated.
|
||||
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
||||
nil)
|
||||
)
|
||||
|
||||
// A ChainProvider will search for a provider which returns credentials
|
||||
// and cache that provider until Retrieve is called again.
|
||||
//
|
||||
// The ChainProvider provides a way of chaining multiple providers together
|
||||
// which will pick the first available using priority order of the Providers
|
||||
// in the list.
|
||||
//
|
||||
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
||||
//
|
||||
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||
// called again.
|
||||
//
|
||||
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
||||
// In this example EnvProvider will first check if any credentials are available
|
||||
// via the environment variables. If there are none ChainProvider will check
|
||||
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
||||
// does not return any credentials ChainProvider will return the error
|
||||
// ErrNoValidProvidersFoundInChain
|
||||
//
|
||||
// creds := credentials.NewChainCredentials(
|
||||
// []credentials.Provider{
|
||||
// &credentials.EnvProvider{},
|
||||
// &ec2rolecreds.EC2RoleProvider{
|
||||
// Client: ec2metadata.New(sess),
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// // Usage of ChainCredentials with aws.Config
|
||||
// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
|
||||
// Credentials: creds,
|
||||
// })))
|
||||
//
|
||||
type ChainProvider struct {
|
||||
Providers []Provider
|
||||
curr Provider
|
||||
VerboseErrors bool
|
||||
}
|
||||
|
||||
// NewChainCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a chain of providers.
|
||||
func NewChainCredentials(providers []Provider) *Credentials {
|
||||
return NewCredentials(&ChainProvider{
|
||||
Providers: append([]Provider{}, providers...),
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value or error if no provider returned
|
||||
// without error.
|
||||
//
|
||||
// If a provider is found it will be cached and any calls to IsExpired()
|
||||
// will return the expired state of the cached provider.
|
||||
func (c *ChainProvider) Retrieve() (Value, error) {
|
||||
var errs []error
|
||||
for _, p := range c.Providers {
|
||||
creds, err := p.Retrieve()
|
||||
if err == nil {
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
c.curr = nil
|
||||
|
||||
var err error
|
||||
err = ErrNoValidProvidersFoundInChain
|
||||
if c.VerboseErrors {
|
||||
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
|
||||
}
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
// if there is one. If there is no current provider, true will be returned.
|
||||
func (c *ChainProvider) IsExpired() bool {
|
||||
if c.curr != nil {
|
||||
return c.curr.IsExpired()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/internal/context"
|
||||
)
|
||||
|
||||
// backgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func backgroundContext() Context {
|
||||
return context.BackgroundCtx
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package credentials
|
||||
|
||||
import "context"
|
||||
|
||||
// backgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func backgroundContext() Context {
|
||||
return context.Background()
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package credentials
|
||||
|
||||
import "time"
|
||||
|
||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
||||
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
|
||||
//
|
||||
// This type, aws.Context, and context.Context are equivalent.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
Value(key interface{}) interface{}
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package credentials
|
||||
|
||||
import "context"
|
||||
|
||||
// Context is an alias of the Go stdlib's context.Context interface.
|
||||
// It can be used within the SDK's API operation "WithContext" methods.
|
||||
//
|
||||
// This type, aws.Context, and context.Context are equivalent.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context = context.Context
|
@ -0,0 +1,383 @@
|
||||
// Package credentials provides credential retrieval and management
|
||||
//
|
||||
// The Credentials is the primary method of getting access to and managing
|
||||
// credentials Values. Using dependency injection retrieval of the credential
|
||||
// values is handled by a object which satisfies the Provider interface.
|
||||
//
|
||||
// By default the Credentials.Get() will cache the successful result of a
|
||||
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||
//
|
||||
// The Provider is responsible for determining when credentials Value have expired.
|
||||
// It is also important to note that Credentials will always call Retrieve the
|
||||
// first time Credentials.Get() is called.
|
||||
//
|
||||
// Example of using the environment variable credentials.
|
||||
//
|
||||
// creds := credentials.NewEnvCredentials()
|
||||
//
|
||||
// // Retrieve the credentials value
|
||||
// credValue, err := creds.Get()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||
// than they would naturally expire on their own.
|
||||
//
|
||||
// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
|
||||
// creds.Expire()
|
||||
// credsValue, err := creds.Get()
|
||||
// // New credentials will be retrieved instead of from cache.
|
||||
//
|
||||
//
|
||||
// Custom Provider
|
||||
//
|
||||
// Each Provider built into this package also provides a helper method to generate
|
||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||
// create a type which satisfies the Provider interface and pass it to the
|
||||
// NewCredentials method.
|
||||
//
|
||||
// type MyProvider struct{}
|
||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||
// func (m *MyProvider) IsExpired() bool {...}
|
||||
//
|
||||
// creds := credentials.NewCredentials(&MyProvider{})
|
||||
// credValue, err := creds.Get()
|
||||
//
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/sync/singleflight"
|
||||
)
|
||||
|
||||
// AnonymousCredentials is an empty Credential object that can be used as
|
||||
// dummy placeholder credentials for requests that do not need signed.
|
||||
//
|
||||
// This Credentials can be used to configure a service to not sign requests
|
||||
// when making service API calls. For example, when accessing public
|
||||
// s3 buckets.
|
||||
//
|
||||
// svc := s3.New(session.Must(session.NewSession(&aws.Config{
|
||||
// Credentials: credentials.AnonymousCredentials,
|
||||
// })))
|
||||
// // Access public S3 buckets.
|
||||
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// AWS Access key ID
|
||||
AccessKeyID string
|
||||
|
||||
// AWS Secret Access Key
|
||||
SecretAccessKey string
|
||||
|
||||
// AWS Session Token
|
||||
SessionToken string
|
||||
|
||||
// Provider used to get credentials
|
||||
ProviderName string
|
||||
}
|
||||
|
||||
// HasKeys returns if the credentials Value has both AccessKeyID and
|
||||
// SecretAccessKey value set.
|
||||
func (v Value) HasKeys() bool {
|
||||
return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
|
||||
}
|
||||
|
||||
// A Provider is the interface for any component which will provide credentials
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
//
|
||||
// The Provider should not need to implement its own mutexes, because
|
||||
// that will be managed by Credentials.
|
||||
type Provider interface {
|
||||
// Retrieve returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// ProviderWithContext is a Provider that can retrieve credentials with a Context
|
||||
type ProviderWithContext interface {
|
||||
Provider
|
||||
|
||||
RetrieveWithContext(Context) (Value, error)
|
||||
}
|
||||
|
||||
// An Expirer is an interface that Providers can implement to expose the expiration
|
||||
// time, if known. If the Provider cannot accurately provide this info,
|
||||
// it should not implement this interface.
|
||||
type Expirer interface {
|
||||
// The time at which the credentials are no longer valid
|
||||
ExpiresAt() time.Time
|
||||
}
|
||||
|
||||
// An ErrorProvider is a stub credentials provider that always returns an error
|
||||
// this is used by the SDK when construction a known provider is not possible
|
||||
// due to an error.
|
||||
type ErrorProvider struct {
|
||||
// The error to be returned from Retrieve
|
||||
Err error
|
||||
|
||||
// The provider name to set on the Retrieved returned Value
|
||||
ProviderName string
|
||||
}
|
||||
|
||||
// Retrieve will always return the error that the ErrorProvider was created with.
|
||||
func (p ErrorProvider) Retrieve() (Value, error) {
|
||||
return Value{ProviderName: p.ProviderName}, p.Err
|
||||
}
|
||||
|
||||
// IsExpired will always return not expired.
|
||||
func (p ErrorProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
// The best method to use this struct is as an anonymous field within the
|
||||
// provider's struct.
|
||||
//
|
||||
// Example:
|
||||
// type EC2RoleProvider struct {
|
||||
// Expiry
|
||||
// ...
|
||||
// }
|
||||
type Expiry struct {
|
||||
// The date/time when to expire on
|
||||
expiration time.Time
|
||||
|
||||
// If set will be used by IsExpired to determine the current time.
|
||||
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
||||
// to be able to mock out the current time.
|
||||
CurrentTime func() time.Time
|
||||
}
|
||||
|
||||
// SetExpiration sets the expiration IsExpired will check when called.
|
||||
//
|
||||
// If window is greater than 0 the expiration time will be reduced by the
|
||||
// window value.
|
||||
//
|
||||
// Using a window is helpful to trigger credentials to expire sooner than
|
||||
// the expiration time given to ensure no requests are made with expired
|
||||
// tokens.
|
||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
// Passed in expirations should have the monotonic clock values stripped.
|
||||
// This ensures time comparisons will be based on wall-time.
|
||||
e.expiration = expiration.Round(0)
|
||||
if window > 0 {
|
||||
e.expiration = e.expiration.Add(-window)
|
||||
}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
curTime := e.CurrentTime
|
||||
if curTime == nil {
|
||||
curTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(curTime())
|
||||
}
|
||||
|
||||
// ExpiresAt returns the expiration time of the credential
|
||||
func (e *Expiry) ExpiresAt() time.Time {
|
||||
return e.expiration
|
||||
}
|
||||
|
||||
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
// Credentials is safe to use across multiple goroutines and will manage the
|
||||
// synchronous state so the Providers do not need to implement their own
|
||||
// synchronization.
|
||||
//
|
||||
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
sf singleflight.Group
|
||||
|
||||
m sync.RWMutex
|
||||
creds Value
|
||||
provider Provider
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
||||
func NewCredentials(provider Provider) *Credentials {
|
||||
c := &Credentials{
|
||||
provider: provider,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// GetWithContext returns the credentials value, or error if the credentials
|
||||
// Value failed to be retrieved. Will return early if the passed in context is
|
||||
// canceled.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
//
|
||||
// Passed in Context is equivalent to aws.Context, and context.Context.
|
||||
func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
// Check if credentials are cached, and not expired.
|
||||
select {
|
||||
case curCreds, ok := <-c.asyncIsExpired():
|
||||
// ok will only be true, of the credentials were not expired. ok will
|
||||
// be false and have no value if the credentials are expired.
|
||||
if ok {
|
||||
return curCreds, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return Value{}, awserr.New("RequestCanceled",
|
||||
"request context canceled", ctx.Err())
|
||||
}
|
||||
|
||||
// Cannot pass context down to the actual retrieve, because the first
|
||||
// context would cancel the whole group when there is not direct
|
||||
// association of items in the group.
|
||||
resCh := c.sf.DoChan("", func() (interface{}, error) {
|
||||
return c.singleRetrieve(&suppressedContext{ctx})
|
||||
})
|
||||
select {
|
||||
case res := <-resCh:
|
||||
return res.Val.(Value), res.Err
|
||||
case <-ctx.Done():
|
||||
return Value{}, awserr.New("RequestCanceled",
|
||||
"request context canceled", ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
return curCreds, nil
|
||||
}
|
||||
|
||||
var creds Value
|
||||
var err error
|
||||
if p, ok := c.provider.(ProviderWithContext); ok {
|
||||
creds, err = p.RetrieveWithContext(ctx)
|
||||
} else {
|
||||
creds, err = c.provider.Retrieve()
|
||||
}
|
||||
if err == nil {
|
||||
c.creds = creds
|
||||
}
|
||||
|
||||
return creds, err
|
||||
}
|
||||
|
||||
// Get returns the credentials value, or error if the credentials Value failed
|
||||
// to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
return c.GetWithContext(backgroundContext())
|
||||
}
|
||||
|
||||
// Expire expires the credentials and forces them to be retrieved on the
|
||||
// next call to Get().
|
||||
//
|
||||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
c.creds = Value{}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
//
|
||||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpiredLocked(c.creds)
|
||||
}
|
||||
|
||||
// asyncIsExpired returns a channel of credentials Value. If the channel is
|
||||
// closed the credentials are expired and credentials value are not empty.
|
||||
func (c *Credentials) asyncIsExpired() <-chan Value {
|
||||
ch := make(chan Value, 1)
|
||||
go func() {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
ch <- curCreds
|
||||
}
|
||||
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// isExpiredLocked helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpiredLocked(creds interface{}) bool {
|
||||
return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
|
||||
}
|
||||
|
||||
// ExpiresAt provides access to the functionality of the Expirer interface of
|
||||
// the underlying Provider, if it supports that interface. Otherwise, it returns
|
||||
// an error.
|
||||
func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
expirer, ok := c.provider.(Expirer)
|
||||
if !ok {
|
||||
return time.Time{}, awserr.New("ProviderNotExpirer",
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()",
|
||||
c.creds.ProviderName),
|
||||
nil)
|
||||
}
|
||||
if c.creds == (Value{}) {
|
||||
// set expiration time to the distant past
|
||||
return time.Time{}, nil
|
||||
}
|
||||
return expirer.ExpiresAt(), nil
|
||||
}
|
||||
|
||||
type suppressedContext struct {
|
||||
Context
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Err() error {
|
||||
return nil
|
||||
}
|
188
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
188
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
@ -0,0 +1,188 @@
|
||||
package ec2rolecreds
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of EC2Role provider
|
||||
const ProviderName = "EC2RoleProvider"
|
||||
|
||||
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
||||
// those credentials are expired.
|
||||
//
|
||||
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
||||
// or ExpiryWindow
|
||||
//
|
||||
// p := &ec2rolecreds.EC2RoleProvider{
|
||||
// // Pass in a custom timeout to be used when requesting
|
||||
// // IAM EC2 Role credentials.
|
||||
// Client: ec2metadata.New(sess, aws.Config{
|
||||
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
|
||||
// }),
|
||||
//
|
||||
// // Do not use early expiry of credentials. If a non zero value is
|
||||
// // specified the credentials will be expired early
|
||||
// ExpiryWindow: 0,
|
||||
// }
|
||||
type EC2RoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// Required EC2Metadata client to use when connecting to EC2 metadata service.
|
||||
Client *ec2metadata.EC2Metadata
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping
|
||||
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
|
||||
// The ConfigProvider is satisfied by the session.Session type.
|
||||
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||
p := &EC2RoleProvider{
|
||||
Client: ec2metadata.New(c),
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
|
||||
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
|
||||
// metadata service.
|
||||
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||
p := &EC2RoleProvider{
|
||||
Client: client,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||
return m.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
credsList, err := requestCredList(ctx, m.Client)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
if len(credsList) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
||||
}
|
||||
credsName := credsList[0]
|
||||
|
||||
roleCreds, err := requestCred(ctx, m.Client, credsName)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: roleCreds.AccessKeyID,
|
||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||
SessionToken: roleCreds.Token,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
// Success State
|
||||
Expiration time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
|
||||
// Error state
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath)
|
||||
if err != nil {
|
||||
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||
}
|
||||
|
||||
credsList := []string{}
|
||||
s := bufio.NewScanner(strings.NewReader(resp))
|
||||
for s.Scan() {
|
||||
credsList = append(credsList, s.Text())
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, awserr.New(request.ErrCodeSerialization,
|
||||
"failed to read EC2 instance role from metadata service", err)
|
||||
}
|
||||
|
||||
return credsList, nil
|
||||
}
|
||||
|
||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
respCreds := ec2RoleCredRespBody{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
if respCreds.Code != "Success" {
|
||||
// If an error code was returned something failed requesting the role.
|
||||
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
||||
}
|
||||
|
||||
return respCreds, nil
|
||||
}
|
@ -0,0 +1,210 @@
|
||||
// Package endpointcreds provides support for retrieving credentials from an
|
||||
// arbitrary HTTP endpoint.
|
||||
//
|
||||
// The credentials endpoint Provider can receive both static and refreshable
|
||||
// credentials that will expire. Credentials are static when an "Expiration"
|
||||
// value is not provided in the endpoint's response.
|
||||
//
|
||||
// Static credentials will never expire once they have been retrieved. The format
|
||||
// of the static credentials response:
|
||||
// {
|
||||
// "AccessKeyId" : "MUA...",
|
||||
// "SecretAccessKey" : "/7PC5om....",
|
||||
// }
|
||||
//
|
||||
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
|
||||
// value in the response. The format of the refreshable credentials response:
|
||||
// {
|
||||
// "AccessKeyId" : "MUA...",
|
||||
// "SecretAccessKey" : "/7PC5om....",
|
||||
// "Token" : "AQoDY....=",
|
||||
// "Expiration" : "2016-02-25T06:03:31Z"
|
||||
// }
|
||||
//
|
||||
// Errors should be returned in the following format and only returned with 400
|
||||
// or 500 HTTP status codes.
|
||||
// {
|
||||
// "code": "ErrorCode",
|
||||
// "message": "Helpful error message."
|
||||
// }
|
||||
package endpointcreds
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
)
|
||||
|
||||
// ProviderName is the name of the credentials provider.
|
||||
const ProviderName = `CredentialsEndpointProvider`
|
||||
|
||||
// Provider satisfies the credentials.Provider interface, and is a client to
|
||||
// retrieve credentials from an arbitrary endpoint.
|
||||
type Provider struct {
|
||||
staticCreds bool
|
||||
credentials.Expiry
|
||||
|
||||
// Requires a AWS Client to make HTTP requests to the endpoint with.
|
||||
// the Endpoint the request will be made to is provided by the aws.Config's
|
||||
// Endpoint value.
|
||||
Client *client.Client
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// Optional authorization token value if set will be used as the value of
|
||||
// the Authorization header of the endpoint credential request.
|
||||
AuthorizationToken string
|
||||
}
|
||||
|
||||
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
||||
// from arbitrary endpoint.
|
||||
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
|
||||
p := &Provider{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: "CredentialsEndpoint",
|
||||
Endpoint: endpoint,
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
p.Client.Handlers.Validate.Clear()
|
||||
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsClient returns a pointer to a new Credentials object
|
||||
// wrapping the endpoint credentials Provider.
|
||||
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||
}
|
||||
|
||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||
// retrieved.
|
||||
func (p *Provider) IsExpired() bool {
|
||||
if p.staticCreds {
|
||||
return false
|
||||
}
|
||||
return p.Expiry.IsExpired()
|
||||
}
|
||||
|
||||
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
||||
// was configured for. And error will be returned if the retrieval fails.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider
|
||||
// was configured for. And error will be returned if the retrieval fails.
|
||||
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
resp, err := p.getCredentials(ctx)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName},
|
||||
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
||||
}
|
||||
|
||||
if resp.Expiration != nil {
|
||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||
} else {
|
||||
p.staticCreds = true
|
||||
}
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: resp.AccessKeyID,
|
||||
SecretAccessKey: resp.SecretAccessKey,
|
||||
SessionToken: resp.Token,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type getCredentialsOutput struct {
|
||||
Expiration *time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
}
|
||||
|
||||
type errorOutput struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetCredentials",
|
||||
HTTPMethod: "GET",
|
||||
}
|
||||
|
||||
out := &getCredentialsOutput{}
|
||||
req := p.Client.NewRequest(op, nil, out)
|
||||
req.SetContext(ctx)
|
||||
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||
if authToken := p.AuthorizationToken; len(authToken) != 0 {
|
||||
req.HTTPRequest.Header.Set("Authorization", authToken)
|
||||
}
|
||||
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
func validateEndpointHandler(r *request.Request) {
|
||||
if len(r.ClientInfo.Endpoint) == 0 {
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalHandler(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
out := r.Data.(*getCredentialsOutput)
|
||||
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode endpoint credentials",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
var errOut errorOutput
|
||||
err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode error message", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Response body format is not consistent between metadata endpoints.
|
||||
// Grab the error message as a string and include that as the source error
|
||||
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// EnvProviderName provides a name of Env provider
|
||||
const EnvProviderName = "EnvProvider"
|
||||
|
||||
var (
|
||||
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||
// found in the process's environment.
|
||||
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||
|
||||
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||
// can't be found in the process's environment.
|
||||
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||
)
|
||||
|
||||
// A EnvProvider retrieves credentials from the environment variables of the
|
||||
// running process. Environment credentials never expire.
|
||||
//
|
||||
// Environment variables used:
|
||||
//
|
||||
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
||||
//
|
||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||
type EnvProvider struct {
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewEnvCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the environment variable provider.
|
||||
func NewEnvCredentials() *Credentials {
|
||||
return NewCredentials(&EnvProvider{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvProvider) Retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if id == "" {
|
||||
id = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if secret == "" {
|
||||
secret = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
|
||||
}
|
||||
|
||||
if secret == "" {
|
||||
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
|
||||
}
|
||||
|
||||
e.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
ProviderName: EnvProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvProvider) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
|
||||
[with_colon]
|
||||
aws_access_key_id: accessKey
|
||||
aws_secret_access_key: secret
|
@ -0,0 +1,426 @@
|
||||
/*
|
||||
Package processcreds is a credential Provider to retrieve `credential_process`
|
||||
credentials.
|
||||
|
||||
WARNING: The following describes a method of sourcing credentials from an external
|
||||
process. This can potentially be dangerous, so proceed with caution. Other
|
||||
credential providers should be preferred if at all possible. If using this
|
||||
option, you should make sure that the config file is as locked down as possible
|
||||
using security best practices for your operating system.
|
||||
|
||||
You can use credentials from a `credential_process` in a variety of ways.
|
||||
|
||||
One way is to setup your shared config file, located in the default
|
||||
location, with the `credential_process` key and the command you want to be
|
||||
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
|
||||
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
|
||||
|
||||
[default]
|
||||
credential_process = /command/to/call
|
||||
|
||||
Creating a new session will use the credential process to retrieve credentials.
|
||||
NOTE: If there are credentials in the profile you are using, the credential
|
||||
process will not be used.
|
||||
|
||||
// Initialize a session to load credentials.
|
||||
sess, _ := session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-east-1")},
|
||||
)
|
||||
|
||||
// Create S3 service client to use the credentials.
|
||||
svc := s3.New(sess)
|
||||
|
||||
Another way to use the `credential_process` method is by using
|
||||
`credentials.NewCredentials()` and providing a command to be executed to
|
||||
retrieve credentials:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentials("/path/to/command")
|
||||
|
||||
// Create service client value configured for credentials.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
You can set a non-default timeout for the `credential_process` with another
|
||||
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
|
||||
set a one minute timeout:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentialsTimeout(
|
||||
"/path/to/command",
|
||||
time.Duration(500) * time.Millisecond)
|
||||
|
||||
If you need more control, you can set any configurable options in the
|
||||
credentials using one or more option functions. For example, you can set a two
|
||||
minute timeout, a credential duration of 60 minutes, and a maximum stdout
|
||||
buffer size of 2k.
|
||||
|
||||
creds := processcreds.NewCredentials(
|
||||
"/path/to/command",
|
||||
func(opt *ProcessProvider) {
|
||||
opt.Timeout = time.Duration(2) * time.Minute
|
||||
opt.Duration = time.Duration(60) * time.Minute
|
||||
opt.MaxBufSize = 2048
|
||||
})
|
||||
|
||||
You can also use your own `exec.Cmd`:
|
||||
|
||||
// Create an exec.Cmd
|
||||
myCommand := exec.Command("/path/to/command")
|
||||
|
||||
// Create credentials using your exec.Cmd and custom timeout
|
||||
creds := processcreds.NewCredentialsCommand(
|
||||
myCommand,
|
||||
func(opt *processcreds.ProcessProvider) {
|
||||
opt.Timeout = time.Duration(1) * time.Second
|
||||
})
|
||||
*/
|
||||
package processcreds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProviderName is the name this credentials provider will label any
|
||||
// returned credentials Value with.
|
||||
ProviderName = `ProcessProvider`
|
||||
|
||||
// ErrCodeProcessProviderParse error parsing process output
|
||||
ErrCodeProcessProviderParse = "ProcessProviderParseError"
|
||||
|
||||
// ErrCodeProcessProviderVersion version error in output
|
||||
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
|
||||
|
||||
// ErrCodeProcessProviderRequired required attribute missing in output
|
||||
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
|
||||
|
||||
// ErrCodeProcessProviderExecution execution of command failed
|
||||
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
|
||||
|
||||
// errMsgProcessProviderTimeout process took longer than allowed
|
||||
errMsgProcessProviderTimeout = "credential process timed out"
|
||||
|
||||
// errMsgProcessProviderProcess process error
|
||||
errMsgProcessProviderProcess = "error in credential_process"
|
||||
|
||||
// errMsgProcessProviderParse problem parsing output
|
||||
errMsgProcessProviderParse = "parse failed of credential_process output"
|
||||
|
||||
// errMsgProcessProviderVersion version error in output
|
||||
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
|
||||
|
||||
// errMsgProcessProviderMissKey missing access key id in output
|
||||
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
|
||||
|
||||
// errMsgProcessProviderMissSecret missing secret acess key in output
|
||||
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
|
||||
|
||||
// errMsgProcessProviderPrepareCmd prepare of command failed
|
||||
errMsgProcessProviderPrepareCmd = "failed to prepare command"
|
||||
|
||||
// errMsgProcessProviderEmptyCmd command must not be empty
|
||||
errMsgProcessProviderEmptyCmd = "command must not be empty"
|
||||
|
||||
// errMsgProcessProviderPipe failed to initialize pipe
|
||||
errMsgProcessProviderPipe = "failed to initialize pipe"
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the
|
||||
// credentials will be valid for.
|
||||
DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// DefaultBufSize limits buffer size from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
DefaultBufSize = int(8 * sdkio.KibiByte)
|
||||
|
||||
// DefaultTimeout default limit on time a process can run.
|
||||
DefaultTimeout = time.Duration(1) * time.Minute
|
||||
)
|
||||
|
||||
// ProcessProvider satisfies the credentials.Provider interface, and is a
|
||||
// client to retrieve credentials from a process.
|
||||
type ProcessProvider struct {
|
||||
staticCreds bool
|
||||
credentials.Expiry
|
||||
originalCommand []string
|
||||
|
||||
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// A string representing an os command that should return a JSON with
|
||||
// credential information.
|
||||
command *exec.Cmd
|
||||
|
||||
// MaxBufSize limits memory usage from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
MaxBufSize int
|
||||
|
||||
// Timeout limits the time a process can run.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// ProcessProvider. The credentials will expire every 15 minutes by default.
|
||||
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: exec.Command(command),
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsTimeout returns a pointer to a new Credentials object with
|
||||
// the specified command and timeout, and default duration and max buffer size.
|
||||
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
|
||||
p := NewCredentials(command, func(opt *ProcessProvider) {
|
||||
opt.Timeout = timeout
|
||||
})
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsCommand returns a pointer to a new Credentials object with
|
||||
// the specified command, and default timeout, duration and max buffer size.
|
||||
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: command,
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
type credentialProcessResponse struct {
|
||||
Version int
|
||||
AccessKeyID string `json:"AccessKeyId"`
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
Expiration *time.Time
|
||||
}
|
||||
|
||||
// Retrieve executes the 'credential_process' and returns the credentials.
|
||||
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
|
||||
out, err := p.executeCredentialProcess()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// Serialize and validate response
|
||||
resp := &credentialProcessResponse{}
|
||||
if err = json.Unmarshal(out, resp); err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderParse,
|
||||
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
|
||||
err)
|
||||
}
|
||||
|
||||
if resp.Version != 1 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderVersion,
|
||||
errMsgProcessProviderVersion,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.AccessKeyID) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissKey,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.SecretAccessKey) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissSecret,
|
||||
nil)
|
||||
}
|
||||
|
||||
// Handle expiration
|
||||
p.staticCreds = resp.Expiration == nil
|
||||
if resp.Expiration != nil {
|
||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||
}
|
||||
|
||||
return credentials.Value{
|
||||
ProviderName: ProviderName,
|
||||
AccessKeyID: resp.AccessKeyID,
|
||||
SecretAccessKey: resp.SecretAccessKey,
|
||||
SessionToken: resp.SessionToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||
// retrieved.
|
||||
func (p *ProcessProvider) IsExpired() bool {
|
||||
if p.staticCreds {
|
||||
return false
|
||||
}
|
||||
return p.Expiry.IsExpired()
|
||||
}
|
||||
|
||||
// prepareCommand prepares the command to be executed.
|
||||
func (p *ProcessProvider) prepareCommand() error {
|
||||
|
||||
var cmdArgs []string
|
||||
if runtime.GOOS == "windows" {
|
||||
cmdArgs = []string{"cmd.exe", "/C"}
|
||||
} else {
|
||||
cmdArgs = []string{"sh", "-c"}
|
||||
}
|
||||
|
||||
if len(p.originalCommand) == 0 {
|
||||
p.originalCommand = make([]string, len(p.command.Args))
|
||||
copy(p.originalCommand, p.command.Args)
|
||||
|
||||
// check for empty command because it succeeds
|
||||
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
|
||||
return awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
fmt.Sprintf(
|
||||
"%s: %s",
|
||||
errMsgProcessProviderPrepareCmd,
|
||||
errMsgProcessProviderEmptyCmd),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, p.originalCommand...)
|
||||
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
p.command.Env = os.Environ()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeCredentialProcess starts the credential process on the OS and
|
||||
// returns the results or an error.
|
||||
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
|
||||
|
||||
if err := p.prepareCommand(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup the pipes
|
||||
outReadPipe, outWritePipe, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderPipe,
|
||||
err)
|
||||
}
|
||||
|
||||
p.command.Stderr = os.Stderr // display stderr on console for MFA
|
||||
p.command.Stdout = outWritePipe // get creds json on process's stdout
|
||||
p.command.Stdin = os.Stdin // enable stdin for MFA
|
||||
|
||||
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
|
||||
|
||||
stdoutCh := make(chan error, 1)
|
||||
go readInput(
|
||||
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
|
||||
output,
|
||||
stdoutCh)
|
||||
|
||||
execCh := make(chan error, 1)
|
||||
go executeCommand(*p.command, execCh)
|
||||
|
||||
finished := false
|
||||
var errors []error
|
||||
for !finished {
|
||||
select {
|
||||
case readError := <-stdoutCh:
|
||||
errors = appendError(errors, readError)
|
||||
finished = true
|
||||
case execError := <-execCh:
|
||||
err := outWritePipe.Close()
|
||||
errors = appendError(errors, err)
|
||||
errors = appendError(errors, execError)
|
||||
if errors != nil {
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderProcess,
|
||||
errors)
|
||||
}
|
||||
case <-time.After(p.Timeout):
|
||||
finished = true
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderTimeout,
|
||||
errors) // errors can be nil
|
||||
}
|
||||
}
|
||||
|
||||
out := output.Bytes()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// windows adds slashes to quotes
|
||||
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// appendError conveniently checks for nil before appending slice
|
||||
func appendError(errors []error, err error) []error {
|
||||
if err != nil {
|
||||
return append(errors, err)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
func executeCommand(cmd exec.Cmd, exec chan error) {
|
||||
// Start the command
|
||||
err := cmd.Start()
|
||||
if err == nil {
|
||||
err = cmd.Wait()
|
||||
}
|
||||
|
||||
exec <- err
|
||||
}
|
||||
|
||||
func readInput(r io.Reader, w io.Writer, read chan error) {
|
||||
tee := io.TeeReader(r, w)
|
||||
|
||||
_, err := ioutil.ReadAll(tee)
|
||||
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
read <- err // will only arrive here when write end of pipe is closed
|
||||
}
|
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
@ -0,0 +1,151 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/ini"
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// SharedCredsProviderName provides a name of SharedCreds provider
|
||||
const SharedCredsProviderName = "SharedCredentialsProvider"
|
||||
|
||||
var (
|
||||
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
||||
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||
)
|
||||
|
||||
// A SharedCredentialsProvider retrieves access key pair (access key ID,
|
||||
// secret access key, and session token if present) credentials from the current
|
||||
// user's home directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Profile ini file example: $HOME/.aws/credentials
|
||||
type SharedCredentialsProvider struct {
|
||||
// Path to the shared credentials file.
|
||||
//
|
||||
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||
// env value is empty will default to current user's home directory.
|
||||
// Linux/OSX: "$HOME/.aws/credentials"
|
||||
// Windows: "%USERPROFILE%\.aws\credentials"
|
||||
Filename string
|
||||
|
||||
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||
// environment variable is also not set.
|
||||
Profile string
|
||||
|
||||
// retrieved states if the credentials have been successfully retrieved.
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewSharedCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the Profile file provider.
|
||||
func NewSharedCredentials(filename, profile string) *Credentials {
|
||||
return NewCredentials(&SharedCredentialsProvider{
|
||||
Filename: filename,
|
||||
Profile: profile,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
||||
p.retrieved = false
|
||||
|
||||
filename, err := p.filename()
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, err
|
||||
}
|
||||
|
||||
creds, err := loadProfile(filename, p.profile())
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, err
|
||||
}
|
||||
|
||||
p.retrieved = true
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *SharedCredentialsProvider) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
func loadProfile(filename, profile string) (Value, error) {
|
||||
config, err := ini.OpenFile(filename)
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
||||
}
|
||||
|
||||
iniProfile, ok := config.GetSection(profile)
|
||||
if !ok {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
|
||||
}
|
||||
|
||||
id := iniProfile.String("aws_access_key_id")
|
||||
if len(id) == 0 {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
secret := iniProfile.String("aws_secret_access_key")
|
||||
if len(secret) == 0 {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
// Default to empty string if not found
|
||||
token := iniProfile.String("aws_session_token")
|
||||
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
ProviderName: SharedCredsProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// filename returns the filename to use to read AWS shared credentials.
|
||||
//
|
||||
// Will return an error if the user's home directory path cannot be found.
|
||||
func (p *SharedCredentialsProvider) filename() (string, error) {
|
||||
if len(p.Filename) != 0 {
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
if home := shareddefaults.UserHomeDir(); len(home) == 0 {
|
||||
// Backwards compatibility of home directly not found error being returned.
|
||||
// This error is too verbose, failure when opening the file would of been
|
||||
// a better error to return.
|
||||
return "", ErrSharedCredentialsHomeNotFound
|
||||
}
|
||||
|
||||
p.Filename = shareddefaults.SharedCredentialsFilename()
|
||||
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
// profile returns the AWS shared credentials profile. If empty will read
|
||||
// environment variable "AWS_PROFILE". If that is not set profile will
|
||||
// return "default".
|
||||
func (p *SharedCredentialsProvider) profile() string {
|
||||
if p.Profile == "" {
|
||||
p.Profile = os.Getenv("AWS_PROFILE")
|
||||
}
|
||||
if p.Profile == "" {
|
||||
p.Profile = "default"
|
||||
}
|
||||
|
||||
return p.Profile
|
||||
}
|
@ -0,0 +1,60 @@
|
||||
// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
|
||||
//
|
||||
// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
|
||||
// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
|
||||
// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
|
||||
// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
|
||||
//
|
||||
// Loading AWS SSO credentials with the AWS shared configuration file
|
||||
//
|
||||
// You can use configure AWS SSO credentials from the AWS shared configuration file by
|
||||
// providing the specifying the required keys in the profile:
|
||||
//
|
||||
// sso_account_id
|
||||
// sso_region
|
||||
// sso_role_name
|
||||
// sso_start_url
|
||||
//
|
||||
// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
|
||||
// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
|
||||
// provided, or an error will be returned.
|
||||
//
|
||||
// [profile devsso]
|
||||
// sso_start_url = https://my-sso-portal.awsapps.com/start
|
||||
// sso_role_name = SSOReadOnlyRole
|
||||
// sso_region = us-east-1
|
||||
// sso_account_id = 123456789012
|
||||
//
|
||||
// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
|
||||
// retrieve credentials. For example:
|
||||
//
|
||||
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||
// SharedConfigState: session.SharedConfigEnable,
|
||||
// Profile: "devsso",
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Programmatically loading AWS SSO credentials directly
|
||||
//
|
||||
// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
|
||||
// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
|
||||
//
|
||||
// svc := sso.New(sess, &aws.Config{
|
||||
// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
|
||||
// })
|
||||
//
|
||||
// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
|
||||
//
|
||||
// credentials, err := provider.Get()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Additional Resources
|
||||
//
|
||||
// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
|
||||
//
|
||||
// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
|
||||
package ssocreds
|
@ -0,0 +1,10 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("HOME")
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
@ -0,0 +1,180 @@
|
||||
package ssocreds
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sso"
|
||||
"github.com/aws/aws-sdk-go/service/sso/ssoiface"
|
||||
)
|
||||
|
||||
// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
|
||||
// To refresh the SSO session run aws sso login with the corresponding profile.
|
||||
const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
|
||||
|
||||
const invalidTokenMessage = "the SSO session has expired or is invalid"
|
||||
|
||||
func init() {
|
||||
nowTime = time.Now
|
||||
defaultCacheLocation = defaultCacheLocationImpl
|
||||
}
|
||||
|
||||
var nowTime func() time.Time
|
||||
|
||||
// ProviderName is the name of the provider used to specify the source of credentials.
|
||||
const ProviderName = "SSOProvider"
|
||||
|
||||
var defaultCacheLocation func() string
|
||||
|
||||
func defaultCacheLocationImpl() string {
|
||||
return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
|
||||
}
|
||||
|
||||
// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
|
||||
type Provider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// The Client which is configured for the AWS Region where the AWS SSO user portal is located.
|
||||
Client ssoiface.SSOAPI
|
||||
|
||||
// The AWS account that is assigned to the user.
|
||||
AccountID string
|
||||
|
||||
// The role name that is assigned to the user.
|
||||
RoleName string
|
||||
|
||||
// The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
|
||||
StartURL string
|
||||
}
|
||||
|
||||
// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
p := &Provider{
|
||||
Client: client,
|
||||
AccountID: accountID,
|
||||
RoleName: roleName,
|
||||
StartURL: startURL,
|
||||
}
|
||||
|
||||
for _, fn := range optFns {
|
||||
fn(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
tokenFile, err := loadTokenFile(p.StartURL)
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
|
||||
AccessToken: &tokenFile.AccessToken,
|
||||
AccountId: &p.AccountID,
|
||||
RoleName: &p.RoleName,
|
||||
})
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
|
||||
p.SetExpiration(expireTime, 0)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
|
||||
SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
|
||||
SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCacheFileName(url string) (string, error) {
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write([]byte(url))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
|
||||
}
|
||||
|
||||
type rfc3339 time.Time
|
||||
|
||||
func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
|
||||
var value string
|
||||
|
||||
if err := json.Unmarshal(bytes, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parse, err := time.Parse(time.RFC3339, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected RFC3339 timestamp: %v", err)
|
||||
}
|
||||
|
||||
*r = rfc3339(parse)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type token struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
ExpiresAt rfc3339 `json:"expiresAt"`
|
||||
Region string `json:"region,omitempty"`
|
||||
StartURL string `json:"startUrl,omitempty"`
|
||||
}
|
||||
|
||||
func (t token) Expired() bool {
|
||||
return nowTime().Round(0).After(time.Time(t.ExpiresAt))
|
||||
}
|
||||
|
||||
func loadTokenFile(startURL string) (t token, err error) {
|
||||
key, err := getCacheFileName(startURL)
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(fileBytes, &t); err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if len(t.AccessToken) == 0 {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
if t.Expired() {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// StaticProviderName provides a name of Static provider
|
||||
const StaticProviderName = "StaticProvider"
|
||||
|
||||
var (
|
||||
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||
)
|
||||
|
||||
// A StaticProvider is a set of credentials which are set programmatically,
|
||||
// and will never expire.
|
||||
type StaticProvider struct {
|
||||
Value
|
||||
}
|
||||
|
||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a static credentials value provider. Token is only required
|
||||
// for temporary security credentials retrieved via STS, otherwise an empty
|
||||
// string can be passed for this parameter.
|
||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
}})
|
||||
}
|
||||
|
||||
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
|
||||
// wrapping the static credentials value provide. Same as NewStaticCredentials
|
||||
// but takes the creds Value instead of individual fields
|
||||
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: creds})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials or error if the credentials are invalid.
|
||||
func (s *StaticProvider) Retrieve() (Value, error) {
|
||||
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
||||
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
|
||||
}
|
||||
|
||||
if len(s.Value.ProviderName) == 0 {
|
||||
s.Value.ProviderName = StaticProviderName
|
||||
}
|
||||
return s.Value, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
//
|
||||
// For StaticProvider, the credentials never expired.
|
||||
func (s *StaticProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
367
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
367
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
@ -0,0 +1,367 @@
|
||||
/*
|
||||
Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||
|
||||
STS provides multiple ways to retrieve credentials which can be used when making
|
||||
future AWS service API operation calls.
|
||||
|
||||
The SDK will ensure that per instance of credentials.Credentials all requests
|
||||
to refresh the credentials will be synchronized. But, the SDK is unable to
|
||||
ensure synchronous usage of the AssumeRoleProvider if the value is shared
|
||||
between multiple Credentials, Sessions or service clients.
|
||||
|
||||
Assume Role
|
||||
|
||||
To assume an IAM role using STS with the SDK you can create a new Credentials
|
||||
with the SDKs's stscreds package.
|
||||
|
||||
// Initial credentials loaded from SDK's default credential chain. Such as
|
||||
// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
|
||||
// Role. These credentials will be used to to make the STS Assume Role API.
|
||||
sess := session.Must(session.NewSession())
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn")
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
Assume Role with static MFA Token
|
||||
|
||||
To assume an IAM role with a MFA token you can either specify a MFA token code
|
||||
directly or provide a function to prompt the user each time the credentials
|
||||
need to refresh the role's credentials. Specifying the TokenCode should be used
|
||||
for short lived operations that will not need to be refreshed, and when you do
|
||||
not want to have direct control over the user provides their MFA token.
|
||||
|
||||
With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
|
||||
credentials.
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN using the MFA token code provided.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
||||
p.TokenCode = aws.String("00000000")
|
||||
})
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
Assume Role with MFA Token Provider
|
||||
|
||||
To assume an IAM role with MFA for longer running tasks where the credentials
|
||||
may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
|
||||
will allow the credential provider to prompt for new MFA token code when the
|
||||
role's credentials need to be refreshed.
|
||||
|
||||
The StdinTokenProvider function is available to prompt on stdin to retrieve
|
||||
the MFA token code from the user. You can also implement custom prompts by
|
||||
satisfing the TokenProvider function signature.
|
||||
|
||||
Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
||||
have undesirable results as the StdinTokenProvider will not be synchronized. A
|
||||
single Credentials with an AssumeRoleProvider can be shared safely.
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
||||
p.TokenProvider = stscreds.StdinTokenProvider
|
||||
})
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
*/
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
|
||||
// An error is returned if reading from stdin fails.
|
||||
//
|
||||
// Use this function to read MFA tokens from stdin. The function makes no attempt
|
||||
// to make atomic prompts from stdin across multiple gorouties.
|
||||
//
|
||||
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
||||
// have undesirable results as the StdinTokenProvider will not be synchronized. A
|
||||
// single Credentials with an AssumeRoleProvider can be shared safely
|
||||
//
|
||||
// Will wait forever until something is provided on the stdin.
|
||||
func StdinTokenProvider() (string, error) {
|
||||
var v string
|
||||
fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
|
||||
_, err := fmt.Scanln(&v)
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
// ProviderName provides a name of AssumeRole provider
|
||||
const ProviderName = "AssumeRoleProvider"
|
||||
|
||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||
type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
type assumeRolerWithContext interface {
|
||||
AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||
// will be valid for.
|
||||
var DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||
// keeps track of their expiration time.
|
||||
//
|
||||
// This credential provider will be used by the SDKs default credential change
|
||||
// when shared configuration is enabled, and the shared config or shared credentials
|
||||
// file configure assume role. See Session docs for how to do this.
|
||||
//
|
||||
// AssumeRoleProvider does not provide any synchronization and it is not safe
|
||||
// to share this value across multiple Credentials, Sessions, or service clients
|
||||
// without also sharing the same Credentials instance.
|
||||
type AssumeRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// STS client to make assume role request with.
|
||||
Client AssumeRoler
|
||||
|
||||
// Role to be assumed.
|
||||
RoleARN string
|
||||
|
||||
// Session name, if you wish to reuse the credentials elsewhere.
|
||||
RoleSessionName string
|
||||
|
||||
// Optional, you can pass tag key-value pairs to your session. These tags are called session tags.
|
||||
Tags []*sts.Tag
|
||||
|
||||
// A list of keys for session tags that you want to set as transitive.
|
||||
// If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain.
|
||||
TransitiveTagKeys []*string
|
||||
|
||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||
ExternalID *string
|
||||
|
||||
// The policy plain text must be 2048 bytes or shorter. However, an internal
|
||||
// conversion compresses it into a packed binary format with a separate limit.
|
||||
// The PackedPolicySize response element indicates by percentage how close to
|
||||
// the upper size limit the policy is, with 100% equaling the maximum allowed
|
||||
// size.
|
||||
Policy *string
|
||||
|
||||
// The ARNs of IAM managed policies you want to use as managed session policies.
|
||||
// The policies must exist in the same account as the role.
|
||||
//
|
||||
// This parameter is optional. You can provide up to 10 managed policy ARNs.
|
||||
// However, the plain text that you use for both inline and managed session
|
||||
// policies can't exceed 2,048 characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
// policy and the session policies. You can use the role's temporary credentials
|
||||
// in subsequent AWS API calls to access resources in the account that owns
|
||||
// the role. You cannot use session policies to grant more permissions than
|
||||
// those allowed by the identity-based policy of the role that is being assumed.
|
||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
PolicyArns []*sts.PolicyDescriptorType
|
||||
|
||||
// The identification number of the MFA device that is associated with the user
|
||||
// who is making the AssumeRole call. Specify this value if the trust policy
|
||||
// of the role being assumed includes a condition that requires MFA authentication.
|
||||
// The value is either the serial number for a hardware device (such as GAHT12345678)
|
||||
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
|
||||
SerialNumber *string
|
||||
|
||||
// The value provided by the MFA device, if the trust policy of the role being
|
||||
// assumed requires MFA (that is, if the policy includes a condition that tests
|
||||
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
||||
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
||||
//
|
||||
// If SerialNumber is set and neither TokenCode nor TokenProvider are also
|
||||
// set an error will be returned.
|
||||
TokenCode *string
|
||||
|
||||
// Async method of providing MFA token code for assuming an IAM role with MFA.
|
||||
// The value returned by the function will be used as the TokenCode in the Retrieve
|
||||
// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
|
||||
//
|
||||
// This token provider will be called when ever the assumed role's
|
||||
// credentials need to be refreshed when SerialNumber is also set and
|
||||
// TokenCode is not set.
|
||||
//
|
||||
// If both TokenCode and TokenProvider is set, TokenProvider will be used and
|
||||
// TokenCode is ignored.
|
||||
TokenProvider func() (string, error)
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// MaxJitterFrac reduces the effective Duration of each credential requested
|
||||
// by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
|
||||
// have a value between 0 and 1. Any other value may lead to expected behavior.
|
||||
// With a MaxJitterFrac value of 0, default) will no jitter will be used.
|
||||
//
|
||||
// For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
|
||||
// AssumeRole call will be made with an arbitrary Duration between 27m and
|
||||
// 30m.
|
||||
//
|
||||
// MaxJitterFrac should not be negative.
|
||||
MaxJitterFrac float64
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
//
|
||||
// It is safe to share the returned Credentials with multiple Sessions and
|
||||
// service clients. All access to the credentials and refreshing them
|
||||
// will be synchronized.
|
||||
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: sts.New(c),
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfied by the STS client.
|
||||
//
|
||||
// It is safe to share the returned Credentials with multiple Sessions and
|
||||
// service clients. All access to the credentials and refreshing them
|
||||
// will be synchronized.
|
||||
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: svc,
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||
}
|
||||
if p.Duration == 0 {
|
||||
// Expire as often as AWS permits.
|
||||
p.Duration = DefaultDuration
|
||||
}
|
||||
jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
|
||||
input := &sts.AssumeRoleInput{
|
||||
DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
|
||||
RoleArn: aws.String(p.RoleARN),
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
ExternalId: p.ExternalID,
|
||||
Tags: p.Tags,
|
||||
PolicyArns: p.PolicyArns,
|
||||
TransitiveTagKeys: p.TransitiveTagKeys,
|
||||
}
|
||||
if p.Policy != nil {
|
||||
input.Policy = p.Policy
|
||||
}
|
||||
if p.SerialNumber != nil {
|
||||
if p.TokenCode != nil {
|
||||
input.SerialNumber = p.SerialNumber
|
||||
input.TokenCode = p.TokenCode
|
||||
} else if p.TokenProvider != nil {
|
||||
input.SerialNumber = p.SerialNumber
|
||||
code, err := p.TokenProvider()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
input.TokenCode = aws.String(code)
|
||||
} else {
|
||||
return credentials.Value{ProviderName: ProviderName},
|
||||
awserr.New("AssumeRoleTokenNotAvailable",
|
||||
"assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
|
||||
}
|
||||
}
|
||||
|
||||
var roleOutput *sts.AssumeRoleOutput
|
||||
var err error
|
||||
|
||||
if c, ok := p.Client.(assumeRolerWithContext); ok {
|
||||
roleOutput, err = c.AssumeRoleWithContext(ctx, input)
|
||||
} else {
|
||||
roleOutput, err = p.Client.AssumeRole(input)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// We will proactively generate new credentials before they expire.
|
||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
182
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
182
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
@ -0,0 +1,182 @@
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/aws/aws-sdk-go/service/sts/stsiface"
|
||||
)
|
||||
|
||||
const (
|
||||
// ErrCodeWebIdentity will be used as an error code when constructing
|
||||
// a new error to be returned during session creation or retrieval.
|
||||
ErrCodeWebIdentity = "WebIdentityErr"
|
||||
|
||||
// WebIdentityProviderName is the web identity provider name
|
||||
WebIdentityProviderName = "WebIdentityCredentials"
|
||||
)
|
||||
|
||||
// now is used to return a time.Time object representing
|
||||
// the current time. This can be used to easily test and
|
||||
// compare test values.
|
||||
var now = time.Now
|
||||
|
||||
// TokenFetcher should return WebIdentity token bytes or an error
|
||||
type TokenFetcher interface {
|
||||
FetchToken(credentials.Context) ([]byte, error)
|
||||
}
|
||||
|
||||
// FetchTokenPath is a path to a WebIdentity token file
|
||||
type FetchTokenPath string
|
||||
|
||||
// FetchToken returns a token by reading from the filesystem
|
||||
func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(string(f))
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("unable to read file at %s", f)
|
||||
return nil, awserr.New(ErrCodeWebIdentity, errMsg, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// WebIdentityRoleProvider is used to retrieve credentials using
|
||||
// an OIDC token.
|
||||
type WebIdentityRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// The policy ARNs to use with the web identity assumed role.
|
||||
PolicyArns []*sts.PolicyDescriptorType
|
||||
|
||||
// Duration the STS credentials will be valid for. Truncated to seconds.
|
||||
// If unset, the assumed role will use AssumeRoleWithWebIdentity's default
|
||||
// expiry duration. See
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity
|
||||
// for more information.
|
||||
Duration time.Duration
|
||||
|
||||
// The amount of time the credentials will be refreshed before they expire.
|
||||
// This is useful refresh credentials before they expire to reduce risk of
|
||||
// using credentials as they expire. If unset, will default to no expiry
|
||||
// window.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
client stsiface.STSAPI
|
||||
|
||||
tokenFetcher TokenFetcher
|
||||
roleARN string
|
||||
roleSessionName string
|
||||
}
|
||||
|
||||
// NewWebIdentityCredentials will return a new set of credentials with a given
|
||||
// configuration, role arn, and token file path.
|
||||
//
|
||||
// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
|
||||
// functional options, and wrap with credentials.NewCredentials helper.
|
||||
func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
|
||||
svc := sts.New(c)
|
||||
p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
|
||||
// provided stsiface.STSAPI
|
||||
//
|
||||
// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
|
||||
// functional options.
|
||||
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
|
||||
return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path))
|
||||
}
|
||||
|
||||
// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the
|
||||
// provided stsiface.STSAPI and a TokenFetcher
|
||||
//
|
||||
// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
|
||||
// functional options.
|
||||
func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider {
|
||||
return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher)
|
||||
}
|
||||
|
||||
// NewWebIdentityRoleProviderWithOptions will return an initialize
|
||||
// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a
|
||||
// TokenFetcher. Additional options can be provided as functional options.
|
||||
//
|
||||
// TokenFetcher is the implementation that will retrieve the JWT token from to
|
||||
// assume the role with. Use the provided FetchTokenPath implementation to
|
||||
// retrieve the JWT token using a file system path.
|
||||
func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider {
|
||||
p := WebIdentityRoleProvider{
|
||||
client: svc,
|
||||
tokenFetcher: tokenFetcher,
|
||||
roleARN: roleARN,
|
||||
roleSessionName: roleSessionName,
|
||||
}
|
||||
|
||||
for _, fn := range optFns {
|
||||
fn(&p)
|
||||
}
|
||||
|
||||
return &p
|
||||
}
|
||||
|
||||
// Retrieve will attempt to assume a role from a token which is located at
|
||||
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
|
||||
// error will be returned.
|
||||
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext will attempt to assume a role from a token which is
|
||||
// located at 'WebIdentityTokenFilePath' specified destination and if that is
|
||||
// empty an error will be returned.
|
||||
func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
b, err := p.tokenFetcher.FetchToken(ctx)
|
||||
if err != nil {
|
||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err)
|
||||
}
|
||||
|
||||
sessionName := p.roleSessionName
|
||||
if len(sessionName) == 0 {
|
||||
// session name is used to uniquely identify a session. This simply
|
||||
// uses unix time in nanoseconds to uniquely identify sessions.
|
||||
sessionName = strconv.FormatInt(now().UnixNano(), 10)
|
||||
}
|
||||
|
||||
var duration *int64
|
||||
if p.Duration != 0 {
|
||||
duration = aws.Int64(int64(p.Duration / time.Second))
|
||||
}
|
||||
|
||||
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
|
||||
PolicyArns: p.PolicyArns,
|
||||
RoleArn: &p.roleARN,
|
||||
RoleSessionName: &sessionName,
|
||||
WebIdentityToken: aws.String(string(b)),
|
||||
DurationSeconds: duration,
|
||||
})
|
||||
|
||||
req.SetContext(ctx)
|
||||
|
||||
// InvalidIdentityToken error is a temporary error that can occur
|
||||
// when assuming an Role with a JWT web identity token.
|
||||
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
|
||||
if err := req.Send(); err != nil {
|
||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
|
||||
}
|
||||
|
||||
p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
|
||||
|
||||
value := credentials.Value{
|
||||
AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
|
||||
SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
|
||||
SessionToken: aws.StringValue(resp.Credentials.SessionToken),
|
||||
ProviderName: WebIdentityProviderName,
|
||||
}
|
||||
return value, nil
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
// Package csm provides the Client Side Monitoring (CSM) client which enables
|
||||
// sending metrics via UDP connection to the CSM agent. This package provides
|
||||
// control options, and configuration for the CSM client. The client can be
|
||||
// controlled manually, or automatically via the SDK's Session configuration.
|
||||
//
|
||||
// Enabling CSM client via SDK's Session configuration
|
||||
//
|
||||
// The CSM client can be enabled automatically via SDK's Session configuration.
|
||||
// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
|
||||
// environment variable is set to a non-empty value.
|
||||
//
|
||||
// The configuration options for the CSM client via the SDK's session
|
||||
// configuration are:
|
||||
//
|
||||
// * AWS_CSM_PORT=<port number>
|
||||
// The port number the CSM agent will receive metrics on.
|
||||
//
|
||||
// * AWS_CSM_HOST=<hostname or ip>
|
||||
// The hostname, or IP address the CSM agent will receive metrics on.
|
||||
// Without port number.
|
||||
//
|
||||
// Manually enabling the CSM client
|
||||
//
|
||||
// The CSM client can be started, paused, and resumed manually. The Start
|
||||
// function will enable the CSM client to publish metrics to the CSM agent. It
|
||||
// is safe to call Start concurrently, but if Start is called additional times
|
||||
// with different ClientID or address it will panic.
|
||||
//
|
||||
// r, err := csm.Start("clientID", ":31000")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed starting CSM: %v", err))
|
||||
// }
|
||||
//
|
||||
// When controlling the CSM client manually, you must also inject its request
|
||||
// handlers into the SDK's Session configuration for the SDK's API clients to
|
||||
// publish metrics.
|
||||
//
|
||||
// sess, err := session.NewSession(&aws.Config{})
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed loading session: %v", err))
|
||||
// }
|
||||
//
|
||||
// // Add CSM client's metric publishing request handlers to the SDK's
|
||||
// // Session Configuration.
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// Controlling CSM client
|
||||
//
|
||||
// Once the CSM client has been enabled the Get function will return a Reporter
|
||||
// value that you can use to pause and resume the metrics published to the CSM
|
||||
// agent. If Get function is called before the reporter is enabled with the
|
||||
// Start function or via SDK's Session configuration nil will be returned.
|
||||
//
|
||||
// The Pause method can be called to stop the CSM client publishing metrics to
|
||||
// the CSM agent. The Continue method will resume metric publishing.
|
||||
//
|
||||
// // Get the CSM client Reporter.
|
||||
// r := csm.Get()
|
||||
//
|
||||
// // Will pause monitoring
|
||||
// r.Pause()
|
||||
// resp, err = client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Resume monitoring
|
||||
// r.Continue()
|
||||
package csm
|
@ -0,0 +1,89 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPort is used when no port is specified.
|
||||
DefaultPort = "31000"
|
||||
|
||||
// DefaultHost is the host that will be used when none is specified.
|
||||
DefaultHost = "127.0.0.1"
|
||||
)
|
||||
|
||||
// AddressWithDefaults returns a CSM address built from the host and port
|
||||
// values. If the host or port is not set, default values will be used
|
||||
// instead. If host is "localhost" it will be replaced with "127.0.0.1".
|
||||
func AddressWithDefaults(host, port string) string {
|
||||
if len(host) == 0 || strings.EqualFold(host, "localhost") {
|
||||
host = DefaultHost
|
||||
}
|
||||
|
||||
if len(port) == 0 {
|
||||
port = DefaultPort
|
||||
}
|
||||
|
||||
// Only IP6 host can contain a colon
|
||||
if strings.Contains(host, ":") {
|
||||
return "[" + host + "]:" + port
|
||||
}
|
||||
|
||||
return host + ":" + port
|
||||
}
|
||||
|
||||
// Start will start a long running go routine to capture
|
||||
// client side metrics. Calling start multiple time will only
|
||||
// start the metric listener once and will panic if a different
|
||||
// client ID or port is passed in.
|
||||
//
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:31000")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(sess.Handlers)
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
func Start(clientID string, url string) (*Reporter, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if sender == nil {
|
||||
sender = newReporter(clientID, url)
|
||||
} else {
|
||||
if sender.clientID != clientID {
|
||||
panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
|
||||
}
|
||||
|
||||
if sender.url != url {
|
||||
panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
|
||||
}
|
||||
}
|
||||
|
||||
if err := connect(url); err != nil {
|
||||
sender = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// Get will return a reporter if one exists, if one does not exist, nil will
|
||||
// be returned.
|
||||
func Get() *Reporter {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
return sender
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
type metricTime time.Time
|
||||
|
||||
func (t metricTime) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(time.Time(t).UnixNano())
|
||||
return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
ClientID *string `json:"ClientId,omitempty"`
|
||||
API *string `json:"Api,omitempty"`
|
||||
Service *string `json:"Service,omitempty"`
|
||||
Timestamp *metricTime `json:"Timestamp,omitempty"`
|
||||
Type *string `json:"Type,omitempty"`
|
||||
Version *int `json:"Version,omitempty"`
|
||||
|
||||
AttemptCount *int `json:"AttemptCount,omitempty"`
|
||||
Latency *int `json:"Latency,omitempty"`
|
||||
|
||||
Fqdn *string `json:"Fqdn,omitempty"`
|
||||
UserAgent *string `json:"UserAgent,omitempty"`
|
||||
AttemptLatency *int `json:"AttemptLatency,omitempty"`
|
||||
|
||||
SessionToken *string `json:"SessionToken,omitempty"`
|
||||
Region *string `json:"Region,omitempty"`
|
||||
AccessKey *string `json:"AccessKey,omitempty"`
|
||||
HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
|
||||
XAmzID2 *string `json:"XAmzId2,omitempty"`
|
||||
XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
|
||||
|
||||
AWSException *string `json:"AwsException,omitempty"`
|
||||
AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
|
||||
SDKException *string `json:"SdkException,omitempty"`
|
||||
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
|
||||
|
||||
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
|
||||
FinalAWSException *string `json:"FinalAwsException,omitempty"`
|
||||
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
|
||||
FinalSDKException *string `json:"FinalSdkException,omitempty"`
|
||||
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
|
||||
|
||||
DestinationIP *string `json:"DestinationIp,omitempty"`
|
||||
ConnectionReused *int `json:"ConnectionReused,omitempty"`
|
||||
|
||||
AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
|
||||
ConnectLatency *int `json:"ConnectLatency,omitempty"`
|
||||
RequestLatency *int `json:"RequestLatency,omitempty"`
|
||||
DNSLatency *int `json:"DnsLatency,omitempty"`
|
||||
TCPLatency *int `json:"TcpLatency,omitempty"`
|
||||
SSLLatency *int `json:"SslLatency,omitempty"`
|
||||
|
||||
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
|
||||
}
|
||||
|
||||
func (m *metric) TruncateFields() {
|
||||
m.ClientID = truncateString(m.ClientID, 255)
|
||||
m.UserAgent = truncateString(m.UserAgent, 256)
|
||||
|
||||
m.AWSException = truncateString(m.AWSException, 128)
|
||||
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
|
||||
|
||||
m.SDKException = truncateString(m.SDKException, 128)
|
||||
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
|
||||
|
||||
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
|
||||
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
|
||||
|
||||
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
|
||||
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
|
||||
}
|
||||
|
||||
func truncateString(v *string, l int) *string {
|
||||
if v != nil && len(*v) > l {
|
||||
nv := (*v)[:l]
|
||||
return &nv
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (m *metric) SetException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.AWSException = aws.String(te.exception)
|
||||
m.AWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.SDKException = aws.String(te.exception)
|
||||
m.SDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetFinalException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.FinalAWSException = aws.String(te.exception)
|
||||
m.FinalAWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.FinalSDKException = aws.String(te.exception)
|
||||
m.FinalSDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
runningEnum = iota
|
||||
pausedEnum
|
||||
)
|
||||
|
||||
var (
|
||||
// MetricsChannelSize of metrics to hold in the channel
|
||||
MetricsChannelSize = 100
|
||||
)
|
||||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused *int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
paused: new(int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
// Push will push metrics to the metric channel if the channel
|
||||
// is not paused
|
||||
func (ch *metricChan) Push(m metric) bool {
|
||||
if ch.IsPaused() {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case ch.ch <- m:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
package csm
|
||||
|
||||
type metricException interface {
|
||||
Exception() string
|
||||
Message() string
|
||||
}
|
||||
|
||||
type requestException struct {
|
||||
exception string
|
||||
message string
|
||||
}
|
||||
|
||||
func (e requestException) Exception() string {
|
||||
return e.exception
|
||||
}
|
||||
func (e requestException) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
type awsException struct {
|
||||
requestException
|
||||
}
|
||||
|
||||
type sdkException struct {
|
||||
requestException
|
||||
}
|
@ -0,0 +1,264 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// Reporter will gather metrics of API requests made and
|
||||
// send those metrics to the CSM endpoint.
|
||||
type Reporter struct {
|
||||
clientID string
|
||||
url string
|
||||
conn net.Conn
|
||||
metricsCh metricChan
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
sender *Reporter
|
||||
)
|
||||
|
||||
func connect(url string) error {
|
||||
const network = "udp"
|
||||
if err := sender.connect(network, url); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sender.done == nil {
|
||||
sender.done = make(chan struct{})
|
||||
go sender.start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newReporter(clientID, url string) *Reporter {
|
||||
return &Reporter{
|
||||
clientID: clientID,
|
||||
url: url,
|
||||
metricsCh: newMetricChan(MetricsChannelSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
creds, _ := r.Config.Credentials.Get()
|
||||
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Region: r.Config.Region,
|
||||
Type: aws.String("ApiCallAttempt"),
|
||||
Version: aws.Int(1),
|
||||
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
|
||||
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
|
||||
AccessKey: aws.String(creds.AccessKeyID),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
m.SetException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func getMetricException(err awserr.Error) metricException {
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case request.ErrCodeRequestError,
|
||||
request.ErrCodeSerialization,
|
||||
request.CanceledErrorCode:
|
||||
return sdkException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
default:
|
||||
return awsException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Type: aws.String("ApiCall"),
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
Region: r.Config.Region,
|
||||
Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
m.SetFinalException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
|
||||
// TODO: Probably want to figure something out for logging dropped
|
||||
// metrics
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func (rep *Reporter) connect(network, url string) error {
|
||||
if rep.conn != nil {
|
||||
rep.conn.Close()
|
||||
}
|
||||
|
||||
conn, err := net.Dial(network, url)
|
||||
if err != nil {
|
||||
return awserr.New("UDPError", "Could not connect", err)
|
||||
}
|
||||
|
||||
rep.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rep *Reporter) close() {
|
||||
if rep.done != nil {
|
||||
close(rep.done)
|
||||
}
|
||||
|
||||
rep.metricsCh.Pause()
|
||||
}
|
||||
|
||||
func (rep *Reporter) start() {
|
||||
defer func() {
|
||||
rep.metricsCh.Pause()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rep.done:
|
||||
rep.done = nil
|
||||
return
|
||||
case m := <-rep.metricsCh.ch:
|
||||
// TODO: What to do with this error? Probably should just log
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rep.conn.Write(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause will pause the metric channel preventing any new metrics from being
|
||||
// added. It is safe to call concurrently with other calls to Pause, but if
|
||||
// called concurently with Continue can lead to unexpected state.
|
||||
func (rep *Reporter) Pause() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rep.close()
|
||||
}
|
||||
|
||||
// Continue will reopen the metric channel and allow for monitoring to be
|
||||
// resumed. It is safe to call concurrently with other calls to Continue, but
|
||||
// if called concurently with Pause can lead to unexpected state.
|
||||
func (rep *Reporter) Continue() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !rep.metricsCh.IsPaused() {
|
||||
return
|
||||
}
|
||||
|
||||
rep.metricsCh.Continue()
|
||||
}
|
||||
|
||||
// Client side metric handler names
|
||||
const (
|
||||
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
|
||||
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
|
||||
)
|
||||
|
||||
// InjectHandlers will will enable client side metrics and inject the proper
|
||||
// handlers to handle how metrics are sent.
|
||||
//
|
||||
// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
|
||||
// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
|
||||
//
|
||||
// // Start must be called in order to inject the correct handlers
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
//
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// // create a new service client with our client side metric session
|
||||
// svc := s3.New(sess)
|
||||
func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
handlers.Complete.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallMetricHandlerName,
|
||||
Fn: rep.sendAPICallMetric,
|
||||
})
|
||||
|
||||
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallAttemptMetricHandlerName,
|
||||
Fn: rep.sendAPICallAttemptMetric,
|
||||
})
|
||||
}
|
||||
|
||||
// boolIntValue return 1 for true and 0 for false.
|
||||
func boolIntValue(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
@ -0,0 +1,207 @@
|
||||
// Package defaults is a collection of helpers to retrieve the SDK's default
|
||||
// configuration and handlers.
|
||||
//
|
||||
// Generally this package shouldn't be used directly, but session.Session
|
||||
// instead. This package is useful when you need to reset the defaults
|
||||
// of a session or service client to the SDK defaults before setting
|
||||
// additional parameters.
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// A Defaults provides a collection of default values for SDK clients.
|
||||
type Defaults struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
}
|
||||
|
||||
// Get returns the SDK's default values with Config and handlers pre-configured.
|
||||
func Get() Defaults {
|
||||
cfg := Config()
|
||||
handlers := Handlers()
|
||||
cfg.Credentials = CredChain(cfg, handlers)
|
||||
|
||||
return Defaults{
|
||||
Config: cfg,
|
||||
Handlers: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
// Config returns the default configuration without credentials.
|
||||
// To retrieve a config with credentials also included use
|
||||
// `defaults.Get().Config` instead.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the configuration of an
|
||||
// existing service client or session.
|
||||
func Config() *aws.Config {
|
||||
return aws.NewConfig().
|
||||
WithCredentials(credentials.AnonymousCredentials).
|
||||
WithRegion(os.Getenv("AWS_REGION")).
|
||||
WithHTTPClient(http.DefaultClient).
|
||||
WithMaxRetries(aws.UseServiceDefaultRetries).
|
||||
WithLogger(aws.NewDefaultLogger()).
|
||||
WithLogLevel(aws.LogOff).
|
||||
WithEndpointResolver(endpoints.DefaultResolver())
|
||||
}
|
||||
|
||||
// Handlers returns the default request handlers.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the request handlers of an
|
||||
// existing service client or session.
|
||||
func Handlers() request.Handlers {
|
||||
var handlers request.Handlers
|
||||
|
||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
|
||||
|
||||
return handlers
|
||||
}
|
||||
|
||||
// CredChain returns the default credential chain.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the credentials of an
|
||||
// existing service client or session's Config.
|
||||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||
Providers: CredProviders(cfg, handlers),
|
||||
})
|
||||
}
|
||||
|
||||
// CredProviders returns the slice of providers used in
|
||||
// the default credential chain.
|
||||
//
|
||||
// For applications that need to use some other provider (for example use
|
||||
// different environment variables for legacy reasons) but still fall back
|
||||
// on the default chain of providers. This allows that default chaint to be
|
||||
// automatically updated
|
||||
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
|
||||
return []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
|
||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
||||
)
|
||||
|
||||
// RemoteCredProvider returns a credentials provider for the default remote
|
||||
// endpoints such as EC2 or ECS Roles.
|
||||
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||
if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
|
||||
return localHTTPCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
|
||||
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
|
||||
return httpCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
return ec2RoleProvider(cfg, handlers)
|
||||
}
|
||||
|
||||
var lookupHostFn = net.LookupHost
|
||||
|
||||
func isLoopbackHost(host string) (bool, error) {
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return ip.IsLoopback(), nil
|
||||
}
|
||||
|
||||
// Host is not an ip, perform lookup
|
||||
addrs, err := lookupHostFn(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if !net.ParseIP(addr).IsLoopback() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
var errMsg string
|
||||
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("invalid URL, %v", err)
|
||||
} else {
|
||||
host := aws.URLHostname(parsed)
|
||||
if len(host) == 0 {
|
||||
errMsg = "unable to parse host from local HTTP cred provider URL"
|
||||
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
|
||||
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
|
||||
} else if !isLoopback {
|
||||
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errMsg) > 0 {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
|
||||
}
|
||||
return credentials.ErrorProvider{
|
||||
Err: awserr.New("CredentialsEndpointError", errMsg, err),
|
||||
ProviderName: endpointcreds.ProviderName,
|
||||
}
|
||||
}
|
||||
|
||||
return httpCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
return endpointcreds.NewProviderClient(cfg, handlers, u,
|
||||
func(p *endpointcreds.Provider) {
|
||||
p.ExpiryWindow = 5 * time.Minute
|
||||
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||
resolver := cfg.EndpointResolver
|
||||
if resolver == nil {
|
||||
resolver = endpoints.DefaultResolver()
|
||||
}
|
||||
|
||||
e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
|
||||
return &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
|
||||
ExpiryWindow: 5 * time.Minute,
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// SharedCredentialsFilename returns the SDK's default file path
|
||||
// for the shared credentials file.
|
||||
//
|
||||
// Builds the shared config file path based on the OS's platform.
|
||||
//
|
||||
// - Linux/Unix: $HOME/.aws/credentials
|
||||
// - Windows: %USERPROFILE%\.aws\credentials
|
||||
func SharedCredentialsFilename() string {
|
||||
return shareddefaults.SharedCredentialsFilename()
|
||||
}
|
||||
|
||||
// SharedConfigFilename returns the SDK's default file path for
|
||||
// the shared config file.
|
||||
//
|
||||
// Builds the shared config file path based on the OS's platform.
|
||||
//
|
||||
// - Linux/Unix: $HOME/.aws/config
|
||||
// - Windows: %USERPROFILE%\.aws\config
|
||||
func SharedConfigFilename() string {
|
||||
return shareddefaults.SharedConfigFilename()
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
// Package aws provides the core SDK's utilities and shared types. Use this package's
|
||||
// utilities to simplify setting and reading API operations parameters.
|
||||
//
|
||||
// Value and Pointer Conversion Utilities
|
||||
//
|
||||
// This package includes a helper conversion utility for each scalar type the SDK's
|
||||
// API use. These utilities make getting a pointer of the scalar, and dereferencing
|
||||
// a pointer easier.
|
||||
//
|
||||
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
|
||||
// The Pointer to value will safely dereference the pointer and return its value.
|
||||
// If the pointer was nil, the scalar's zero value will be returned.
|
||||
//
|
||||
// The value to pointer functions will be named after the scalar type. So get a
|
||||
// *string from a string value use the "String" function. This makes it easy to
|
||||
// to get pointer of a literal string value, because getting the address of a
|
||||
// literal requires assigning the value to a variable first.
|
||||
//
|
||||
// var strPtr *string
|
||||
//
|
||||
// // Without the SDK's conversion functions
|
||||
// str := "my string"
|
||||
// strPtr = &str
|
||||
//
|
||||
// // With the SDK's conversion functions
|
||||
// strPtr = aws.String("my string")
|
||||
//
|
||||
// // Convert *string to string value
|
||||
// str = aws.StringValue(strPtr)
|
||||
//
|
||||
// In addition to scalars the aws package also includes conversion utilities for
|
||||
// map and slice for commonly types used in API parameters. The map and slice
|
||||
// conversion functions use similar naming pattern as the scalar conversion
|
||||
// functions.
|
||||
//
|
||||
// var strPtrs []*string
|
||||
// var strs []string = []string{"Go", "Gophers", "Go"}
|
||||
//
|
||||
// // Convert []string to []*string
|
||||
// strPtrs = aws.StringSlice(strs)
|
||||
//
|
||||
// // Convert []*string to []string
|
||||
// strs = aws.StringValueSlice(strPtrs)
|
||||
//
|
||||
// SDK Default HTTP Client
|
||||
//
|
||||
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
|
||||
// the SDK's Session, or service client constructor. This means that if the
|
||||
// http.DefaultClient is modified by other components of your application the
|
||||
// modifications will be picked up by the SDK as well.
|
||||
//
|
||||
// In some cases this might be intended, but it is a better practice to create
|
||||
// a custom HTTP Client to share explicitly through your application. You can
|
||||
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
|
||||
// value of the SDK's Config type when creating a Session or service client.
|
||||
package aws
|
@ -0,0 +1,250 @@
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// getToken uses the duration to return a token for EC2 metadata service,
|
||||
// or an error if the request failed.
|
||||
func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetToken",
|
||||
HTTPMethod: "PUT",
|
||||
HTTPPath: "/latest/api/token",
|
||||
}
|
||||
|
||||
var output tokenOutput
|
||||
req := c.NewRequest(op, nil, &output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
// remove the fetch token handler from the request handlers to avoid infinite recursion
|
||||
req.Handlers.Sign.RemoveByName(fetchTokenHandlerName)
|
||||
|
||||
// Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request.
|
||||
req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler)
|
||||
|
||||
ttl := strconv.FormatInt(int64(duration/time.Second), 10)
|
||||
req.HTTPRequest.Header.Set(ttlHeader, ttl)
|
||||
|
||||
err := req.Send()
|
||||
|
||||
// Errors with bad request status should be returned.
|
||||
if err != nil {
|
||||
err = awserr.NewRequestFailure(
|
||||
awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err),
|
||||
req.HTTPResponse.StatusCode, req.RequestID)
|
||||
}
|
||||
|
||||
return output, err
|
||||
}
|
||||
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
// instance metadata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
return c.GetMetadataWithContext(aws.BackgroundContext(), p)
|
||||
}
|
||||
|
||||
// GetMetadataWithContext uses the path provided to request information from the EC2
|
||||
// instance metadata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: sdkuri.PathJoin("/latest/meta-data", p),
|
||||
}
|
||||
output := &metadataOutput{}
|
||||
|
||||
req := c.NewRequest(op, nil, output)
|
||||
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetUserData returns the userdata that was configured for the service. If
|
||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
||||
// code will be returned.
|
||||
func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
return c.GetUserDataWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// GetUserDataWithContext returns the userdata that was configured for the service. If
|
||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
||||
// code will be returned.
|
||||
func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: "/latest/user-data",
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetDynamicData uses the path provided to request information from the EC2
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
return c.GetDynamicDataWithContext(aws.BackgroundContext(), p)
|
||||
}
|
||||
|
||||
// GetDynamicDataWithContext uses the path provided to request information from the EC2
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: sdkuri.PathJoin("/latest/dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||
return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) {
|
||||
resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document")
|
||||
if err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
doc := EC2InstanceIdentityDocument{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// IAMInfo retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||
return c.IAMInfoWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// IAMInfoWithContext retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) {
|
||||
resp, err := c.GetMetadataWithContext(ctx, "iam/info")
|
||||
if err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 IAM info", err)
|
||||
}
|
||||
|
||||
info := EC2IAMInfo{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode EC2 IAM info", err)
|
||||
}
|
||||
|
||||
if info.Code != "Success" {
|
||||
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataError", errMsg, nil)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Region returns the region the instance is running in.
|
||||
func (c *EC2Metadata) Region() (string, error) {
|
||||
return c.RegionWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RegionWithContext returns the region the instance is running in.
|
||||
func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) {
|
||||
ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// extract region from the ec2InstanceIdentityDocument
|
||||
region := ec2InstanceIdentityDocument.Region
|
||||
if len(region) == 0 {
|
||||
return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil)
|
||||
}
|
||||
// returns region
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// Available returns if the application has access to the EC2 Metadata service.
|
||||
// Can be used to determine if application is running within an EC2 Instance and
|
||||
// the metadata service is available.
|
||||
func (c *EC2Metadata) Available() bool {
|
||||
return c.AvailableWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// AvailableWithContext returns if the application has access to the EC2 Metadata service.
|
||||
// Can be used to determine if application is running within an EC2 Instance and
|
||||
// the metadata service is available.
|
||||
func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool {
|
||||
if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// An EC2IAMInfo provides the shape for unmarshaling
|
||||
// an IAM info from the metadata API
|
||||
type EC2IAMInfo struct {
|
||||
Code string
|
||||
LastUpdated time.Time
|
||||
InstanceProfileArn string
|
||||
InstanceProfileID string
|
||||
}
|
||||
|
||||
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
|
||||
// an instance identity document
|
||||
type EC2InstanceIdentityDocument struct {
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
}
|
@ -0,0 +1,245 @@
|
||||
// Package ec2metadata provides the client for making API calls to the
|
||||
// EC2 Metadata service.
|
||||
//
|
||||
// This package's client can be disabled completely by setting the environment
|
||||
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
|
||||
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
|
||||
// be used while the environment variable is set to true, (case insensitive).
|
||||
//
|
||||
// The endpoint of the EC2 IMDS client can be configured via the environment
|
||||
// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
|
||||
// Session. See aws/session#Options.EC2IMDSEndpoint for more details.
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// ServiceName is the name of the service.
|
||||
ServiceName = "ec2metadata"
|
||||
disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
|
||||
|
||||
// Headers for Token and TTL
|
||||
ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds"
|
||||
tokenHeader = "x-aws-ec2-metadata-token"
|
||||
|
||||
// Named Handler constants
|
||||
fetchTokenHandlerName = "FetchTokenHandler"
|
||||
unmarshalMetadataHandlerName = "unmarshalMetadataHandler"
|
||||
unmarshalTokenHandlerName = "unmarshalTokenHandler"
|
||||
enableTokenProviderHandlerName = "enableTokenProviderHandler"
|
||||
|
||||
// TTL constants
|
||||
defaultTTL = 21600 * time.Second
|
||||
ttlExpirationWindow = 30 * time.Second
|
||||
)
|
||||
|
||||
// A EC2Metadata is an EC2 Metadata service Client.
|
||||
type EC2Metadata struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// New creates a new instance of the EC2Metadata client with a session.
|
||||
// This client is safe to use across multiple goroutines.
|
||||
//
|
||||
//
|
||||
// Example:
|
||||
// // Create a EC2Metadata client from just a session.
|
||||
// svc := ec2metadata.New(mySession)
|
||||
//
|
||||
// // Create a EC2Metadata client with additional configuration
|
||||
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
|
||||
c := p.ClientConfig(ServiceName, cfgs...)
|
||||
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||
}
|
||||
|
||||
// NewClient returns a new EC2Metadata client. Should be used to create
|
||||
// a client when not using a session. Generally using just New with a session
|
||||
// is preferred.
|
||||
//
|
||||
// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS
|
||||
// client is able to communicate with the EC2 IMDS API.
|
||||
//
|
||||
// If an unmodified HTTP client is provided from the stdlib default, or no client
|
||||
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
|
||||
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
|
||||
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
|
||||
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
|
||||
// If the http client is unmodified and this feature is not disabled
|
||||
// set custom timeouts for EC2Metadata requests.
|
||||
cfg.HTTPClient = &http.Client{
|
||||
// use a shorter timeout than default because the metadata
|
||||
// service is local if it is running, and to fail faster
|
||||
// if not running on an ec2 instance.
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
// max number of retries on the client operation
|
||||
cfg.MaxRetries = aws.Int(2)
|
||||
}
|
||||
|
||||
if u, err := url.Parse(endpoint); err == nil {
|
||||
// Remove path from the endpoint since it will be added by requests.
|
||||
// This is an artifact of the SDK adding `/latest` to the endpoint for
|
||||
// EC2 IMDS, but this is now moved to the operation definition.
|
||||
u.Path = ""
|
||||
u.RawPath = ""
|
||||
endpoint = u.String()
|
||||
}
|
||||
|
||||
svc := &EC2Metadata{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceName,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "latest",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// token provider instance
|
||||
tp := newTokenProvider(svc, defaultTTL)
|
||||
|
||||
// NamedHandler for fetching token
|
||||
svc.Handlers.Sign.PushBackNamed(request.NamedHandler{
|
||||
Name: fetchTokenHandlerName,
|
||||
Fn: tp.fetchTokenHandler,
|
||||
})
|
||||
// NamedHandler for enabling token provider
|
||||
svc.Handlers.Complete.PushBackNamed(request.NamedHandler{
|
||||
Name: enableTokenProviderHandlerName,
|
||||
Fn: tp.enableTokenProviderHandler,
|
||||
})
|
||||
|
||||
svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler)
|
||||
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
svc.Handlers.Validate.Clear()
|
||||
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
// Disable the EC2 Metadata service if the environment variable is set.
|
||||
// This short-circuits the service's functionality to always fail to send
|
||||
// requests.
|
||||
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
|
||||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.HTTPResponse = &http.Response{
|
||||
Header: http.Header{},
|
||||
}
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
nil)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add additional options to the service config
|
||||
for _, option := range opts {
|
||||
option(svc.Client)
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
func httpClientZero(c *http.Client) bool {
|
||||
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
|
||||
}
|
||||
|
||||
type metadataOutput struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
type tokenOutput struct {
|
||||
Token string
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
// unmarshal token handler is used to parse the response of a getToken operation
|
||||
var unmarshalTokenHandler = request.NamedHandler{
|
||||
Name: unmarshalTokenHandlerName,
|
||||
Fn: func(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
var b bytes.Buffer
|
||||
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
|
||||
"unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
|
||||
return
|
||||
}
|
||||
|
||||
v := r.HTTPResponse.Header.Get(ttlHeader)
|
||||
data, ok := r.Data.(*tokenOutput)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
data.Token = b.String()
|
||||
// TTL is in seconds
|
||||
i, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode,
|
||||
"unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID)
|
||||
return
|
||||
}
|
||||
t := time.Duration(i) * time.Second
|
||||
data.TTL = t
|
||||
},
|
||||
}
|
||||
|
||||
var unmarshalHandler = request.NamedHandler{
|
||||
Name: unmarshalMetadataHandlerName,
|
||||
Fn: func(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
var b bytes.Buffer
|
||||
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
|
||||
"unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
|
||||
return
|
||||
}
|
||||
|
||||
if data, ok := r.Data.(*metadataOutput); ok {
|
||||
data.Content = b.String()
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
var b bytes.Buffer
|
||||
|
||||
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err),
|
||||
r.HTTPResponse.StatusCode, r.RequestID)
|
||||
return
|
||||
}
|
||||
|
||||
// Response body format is not consistent between metadata endpoints.
|
||||
// Grab the error message as a string and include that as the source error
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil),
|
||||
r.HTTPResponse.StatusCode, r.RequestID)
|
||||
}
|
||||
|
||||
func validateEndpointHandler(r *request.Request) {
|
||||
if r.ClientInfo.Endpoint == "" {
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// A tokenProvider struct provides access to EC2Metadata client
|
||||
// and atomic instance of a token, along with configuredTTL for it.
|
||||
// tokenProvider also provides an atomic flag to disable the
|
||||
// fetch token operation.
|
||||
// The disabled member will use 0 as false, and 1 as true.
|
||||
type tokenProvider struct {
|
||||
client *EC2Metadata
|
||||
token atomic.Value
|
||||
configuredTTL time.Duration
|
||||
disabled uint32
|
||||
}
|
||||
|
||||
// A ec2Token struct helps use of token in EC2 Metadata service ops
|
||||
type ec2Token struct {
|
||||
token string
|
||||
credentials.Expiry
|
||||
}
|
||||
|
||||
// newTokenProvider provides a pointer to a tokenProvider instance
|
||||
func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
|
||||
return &tokenProvider{client: c, configuredTTL: duration}
|
||||
}
|
||||
|
||||
// fetchTokenHandler fetches token for EC2Metadata service client by default.
|
||||
func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
|
||||
|
||||
// short-circuits to insecure data flow if tokenProvider is disabled.
|
||||
if v := atomic.LoadUint32(&t.disabled); v == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() {
|
||||
r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
|
||||
return
|
||||
}
|
||||
|
||||
output, err := t.client.getToken(r.Context(), t.configuredTTL)
|
||||
|
||||
if err != nil {
|
||||
|
||||
// change the disabled flag on token provider to true,
|
||||
// when error is request timeout error.
|
||||
if requestFailureError, ok := err.(awserr.RequestFailure); ok {
|
||||
switch requestFailureError.StatusCode() {
|
||||
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
|
||||
atomic.StoreUint32(&t.disabled, 1)
|
||||
case http.StatusBadRequest:
|
||||
r.Error = requestFailureError
|
||||
}
|
||||
|
||||
// Check if request timed out while waiting for response
|
||||
if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
|
||||
if e.Code() == request.ErrCodeRequestError {
|
||||
atomic.StoreUint32(&t.disabled, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
newToken := ec2Token{
|
||||
token: output.Token,
|
||||
}
|
||||
newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow)
|
||||
t.token.Store(newToken)
|
||||
|
||||
// Inject token header to the request.
|
||||
if ec2Token, ok := t.token.Load().(ec2Token); ok {
|
||||
r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
|
||||
}
|
||||
}
|
||||
|
||||
// enableTokenProviderHandler enables the token provider
|
||||
func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
|
||||
// If the error code status is 401, we enable the token provider
|
||||
if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
|
||||
e.StatusCode() == http.StatusUnauthorized {
|
||||
t.token.Store(ec2Token{})
|
||||
atomic.StoreUint32(&t.disabled, 0)
|
||||
}
|
||||
}
|
@ -0,0 +1,193 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
type modelDefinition map[string]json.RawMessage
|
||||
|
||||
// A DecodeModelOptions are the options for how the endpoints model definition
|
||||
// are decoded.
|
||||
type DecodeModelOptions struct {
|
||||
SkipCustomizations bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together.
|
||||
func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
|
||||
for _, fn := range optFns {
|
||||
fn(d)
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeModel unmarshals a Regions and Endpoint model definition file into
|
||||
// a endpoint Resolver. If the file format is not supported, or an error occurs
|
||||
// when unmarshaling the model an error will be returned.
|
||||
//
|
||||
// Casting the return value of this func to a EnumPartitions will
|
||||
// allow you to get a list of the partitions in the order the endpoints
|
||||
// will be resolved in.
|
||||
//
|
||||
// resolver, err := endpoints.DecodeModel(reader)
|
||||
//
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
|
||||
var opts DecodeModelOptions
|
||||
opts.Set(optFns...)
|
||||
|
||||
// Get the version of the partition file to determine what
|
||||
// unmarshaling model to use.
|
||||
modelDef := modelDefinition{}
|
||||
if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
|
||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
||||
}
|
||||
|
||||
var version string
|
||||
if b, ok := modelDef["version"]; ok {
|
||||
version = string(b)
|
||||
} else {
|
||||
return nil, newDecodeModelError("endpoints version not found in model", nil)
|
||||
}
|
||||
|
||||
if version == "3" {
|
||||
return decodeV3Endpoints(modelDef, opts)
|
||||
}
|
||||
|
||||
return nil, newDecodeModelError(
|
||||
fmt.Sprintf("endpoints version %s, not supported", version), nil)
|
||||
}
|
||||
|
||||
func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
|
||||
b, ok := modelDef["partitions"]
|
||||
if !ok {
|
||||
return nil, newDecodeModelError("endpoints model missing partitions", nil)
|
||||
}
|
||||
|
||||
ps := partitions{}
|
||||
if err := json.Unmarshal(b, &ps); err != nil {
|
||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
||||
}
|
||||
|
||||
if opts.SkipCustomizations {
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// Customization
|
||||
for i := 0; i < len(ps); i++ {
|
||||
p := &ps[i]
|
||||
custRegionalS3(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
custFixAppAutoscalingUsGov(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func custRegionalS3(p *partition) {
|
||||
if p.ID != "aws" {
|
||||
return
|
||||
}
|
||||
|
||||
service, ok := p.Services["s3"]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
const awsGlobal = "aws-global"
|
||||
const usEast1 = "us-east-1"
|
||||
|
||||
// If global endpoint already exists no customization needed.
|
||||
if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
service.PartitionEndpoint = awsGlobal
|
||||
if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok {
|
||||
service.Endpoints[endpointKey{Region: usEast1}] = endpoint{}
|
||||
}
|
||||
service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{
|
||||
Hostname: "s3.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: usEast1,
|
||||
},
|
||||
}
|
||||
|
||||
p.Services["s3"] = service
|
||||
}
|
||||
|
||||
func custRmIotDataService(p *partition) {
|
||||
delete(p.Services, "data.iot")
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingChina(p *partition) {
|
||||
if p.ID != "aws-cn" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
const expectHostname = `autoscaling.{region}.amazonaws.com`
|
||||
serviceDefault := s.Defaults[defaultKey{}]
|
||||
if e, a := expectHostname, serviceDefault.Hostname; e != a {
|
||||
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
|
||||
return
|
||||
}
|
||||
serviceDefault.Hostname = expectHostname + ".cn"
|
||||
s.Defaults[defaultKey{}] = serviceDefault
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingUsGov(p *partition) {
|
||||
if p.ID != "aws-us-gov" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
serviceDefault := s.Defaults[defaultKey{}]
|
||||
if a := serviceDefault.CredentialScope.Service; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
if a := serviceDefault.Hostname; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
serviceDefault.CredentialScope.Service = "application-autoscaling"
|
||||
serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com"
|
||||
|
||||
if s.Defaults == nil {
|
||||
s.Defaults = make(endpointDefaults)
|
||||
}
|
||||
|
||||
s.Defaults[defaultKey{}] = serviceDefault
|
||||
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
||||
func newDecodeModelError(msg string, err error) decodeModelError {
|
||||
return decodeModelError{
|
||||
awsError: awserr.New("DecodeEndpointsModelError", msg, err),
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,141 @@
|
||||
package endpoints
|
||||
|
||||
// Service identifiers
|
||||
//
|
||||
// Deprecated: Use client package's EndpointsID value instead of these
|
||||
// ServiceIDs. These IDs are not maintained, and are out of date.
|
||||
const (
|
||||
A4bServiceID = "a4b" // A4b.
|
||||
AcmServiceID = "acm" // Acm.
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
AppsyncServiceID = "appsync" // Appsync.
|
||||
AthenaServiceID = "athena" // Athena.
|
||||
AutoscalingServiceID = "autoscaling" // Autoscaling.
|
||||
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
|
||||
BatchServiceID = "batch" // Batch.
|
||||
BudgetsServiceID = "budgets" // Budgets.
|
||||
CeServiceID = "ce" // Ce.
|
||||
ChimeServiceID = "chime" // Chime.
|
||||
Cloud9ServiceID = "cloud9" // Cloud9.
|
||||
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
|
||||
CloudformationServiceID = "cloudformation" // Cloudformation.
|
||||
CloudfrontServiceID = "cloudfront" // Cloudfront.
|
||||
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
|
||||
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
|
||||
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
|
||||
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
|
||||
CodebuildServiceID = "codebuild" // Codebuild.
|
||||
CodecommitServiceID = "codecommit" // Codecommit.
|
||||
CodedeployServiceID = "codedeploy" // Codedeploy.
|
||||
CodepipelineServiceID = "codepipeline" // Codepipeline.
|
||||
CodestarServiceID = "codestar" // Codestar.
|
||||
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
|
||||
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
|
||||
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
|
||||
ComprehendServiceID = "comprehend" // Comprehend.
|
||||
ConfigServiceID = "config" // Config.
|
||||
CurServiceID = "cur" // Cur.
|
||||
DatapipelineServiceID = "datapipeline" // Datapipeline.
|
||||
DaxServiceID = "dax" // Dax.
|
||||
DevicefarmServiceID = "devicefarm" // Devicefarm.
|
||||
DirectconnectServiceID = "directconnect" // Directconnect.
|
||||
DiscoveryServiceID = "discovery" // Discovery.
|
||||
DmsServiceID = "dms" // Dms.
|
||||
DsServiceID = "ds" // Ds.
|
||||
DynamodbServiceID = "dynamodb" // Dynamodb.
|
||||
Ec2ServiceID = "ec2" // Ec2.
|
||||
Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
|
||||
EcrServiceID = "ecr" // Ecr.
|
||||
EcsServiceID = "ecs" // Ecs.
|
||||
ElasticacheServiceID = "elasticache" // Elasticache.
|
||||
ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
|
||||
ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
|
||||
ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
|
||||
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
|
||||
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
|
||||
EmailServiceID = "email" // Email.
|
||||
EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
|
||||
EsServiceID = "es" // Es.
|
||||
EventsServiceID = "events" // Events.
|
||||
FirehoseServiceID = "firehose" // Firehose.
|
||||
FmsServiceID = "fms" // Fms.
|
||||
GameliftServiceID = "gamelift" // Gamelift.
|
||||
GlacierServiceID = "glacier" // Glacier.
|
||||
GlueServiceID = "glue" // Glue.
|
||||
GreengrassServiceID = "greengrass" // Greengrass.
|
||||
GuarddutyServiceID = "guardduty" // Guardduty.
|
||||
HealthServiceID = "health" // Health.
|
||||
IamServiceID = "iam" // Iam.
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
InspectorServiceID = "inspector" // Inspector.
|
||||
IotServiceID = "iot" // Iot.
|
||||
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
KmsServiceID = "kms" // Kms.
|
||||
LambdaServiceID = "lambda" // Lambda.
|
||||
LightsailServiceID = "lightsail" // Lightsail.
|
||||
LogsServiceID = "logs" // Logs.
|
||||
MachinelearningServiceID = "machinelearning" // Machinelearning.
|
||||
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
|
||||
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
|
||||
MedialiveServiceID = "medialive" // Medialive.
|
||||
MediapackageServiceID = "mediapackage" // Mediapackage.
|
||||
MediastoreServiceID = "mediastore" // Mediastore.
|
||||
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
|
||||
MghServiceID = "mgh" // Mgh.
|
||||
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
|
||||
ModelsLexServiceID = "models.lex" // ModelsLex.
|
||||
MonitoringServiceID = "monitoring" // Monitoring.
|
||||
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
|
||||
NeptuneServiceID = "neptune" // Neptune.
|
||||
OpsworksServiceID = "opsworks" // Opsworks.
|
||||
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
|
||||
OrganizationsServiceID = "organizations" // Organizations.
|
||||
PinpointServiceID = "pinpoint" // Pinpoint.
|
||||
PollyServiceID = "polly" // Polly.
|
||||
RdsServiceID = "rds" // Rds.
|
||||
RedshiftServiceID = "redshift" // Redshift.
|
||||
RekognitionServiceID = "rekognition" // Rekognition.
|
||||
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
|
||||
Route53ServiceID = "route53" // Route53.
|
||||
Route53domainsServiceID = "route53domains" // Route53domains.
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
S3ControlServiceID = "s3-control" // S3Control.
|
||||
SagemakerServiceID = "api.sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
|
||||
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
|
||||
ShieldServiceID = "shield" // Shield.
|
||||
SmsServiceID = "sms" // Sms.
|
||||
SnowballServiceID = "snowball" // Snowball.
|
||||
SnsServiceID = "sns" // Sns.
|
||||
SqsServiceID = "sqs" // Sqs.
|
||||
SsmServiceID = "ssm" // Ssm.
|
||||
StatesServiceID = "states" // States.
|
||||
StoragegatewayServiceID = "storagegateway" // Storagegateway.
|
||||
StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
|
||||
StsServiceID = "sts" // Sts.
|
||||
SupportServiceID = "support" // Support.
|
||||
SwfServiceID = "swf" // Swf.
|
||||
TaggingServiceID = "tagging" // Tagging.
|
||||
TransferServiceID = "transfer" // Transfer.
|
||||
TranslateServiceID = "translate" // Translate.
|
||||
WafServiceID = "waf" // Waf.
|
||||
WafRegionalServiceID = "waf-regional" // WafRegional.
|
||||
WorkdocsServiceID = "workdocs" // Workdocs.
|
||||
WorkmailServiceID = "workmail" // Workmail.
|
||||
WorkspacesServiceID = "workspaces" // Workspaces.
|
||||
XrayServiceID = "xray" // Xray.
|
||||
)
|
@ -0,0 +1,66 @@
|
||||
// Package endpoints provides the types and functionality for defining regions
|
||||
// and endpoints, as well as querying those definitions.
|
||||
//
|
||||
// The SDK's Regions and Endpoints metadata is code generated into the endpoints
|
||||
// package, and is accessible via the DefaultResolver function. This function
|
||||
// returns a endpoint Resolver will search the metadata and build an associated
|
||||
// endpoint if one is found. The default resolver will search all partitions
|
||||
// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
|
||||
// AWS GovCloud (US) (aws-us-gov).
|
||||
// .
|
||||
//
|
||||
// Enumerating Regions and Endpoint Metadata
|
||||
//
|
||||
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
|
||||
// will allow you to get access to the list of underlying Partitions with the
|
||||
// Partitions method. This is helpful if you want to limit the SDK's endpoint
|
||||
// resolving to a single partition, or enumerate regions, services, and endpoints
|
||||
// in the partition.
|
||||
//
|
||||
// resolver := endpoints.DefaultResolver()
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
//
|
||||
// for _, p := range partitions {
|
||||
// fmt.Println("Regions for", p.ID())
|
||||
// for id, _ := range p.Regions() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
//
|
||||
// fmt.Println("Services for", p.ID())
|
||||
// for id, _ := range p.Services() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Using Custom Endpoints
|
||||
//
|
||||
// The endpoints package also gives you the ability to use your own logic how
|
||||
// endpoints are resolved. This is a great way to define a custom endpoint
|
||||
// for select services, without passing that logic down through your code.
|
||||
//
|
||||
// If a type implements the Resolver interface it can be used to resolve
|
||||
// endpoints. To use this with the SDK's Session and Config set the value
|
||||
// of the type to the EndpointsResolver field of aws.Config when initializing
|
||||
// the session, or service client.
|
||||
//
|
||||
// In addition the ResolverFunc is a wrapper for a func matching the signature
|
||||
// of Resolver.EndpointFor, converting it to a type that satisfies the
|
||||
// Resolver interface.
|
||||
//
|
||||
//
|
||||
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
// if service == endpoints.S3ServiceID {
|
||||
// return endpoints.ResolvedEndpoint{
|
||||
// URL: "s3.custom.endpoint.com",
|
||||
// SigningRegion: "custom-signing-region",
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
// }
|
||||
//
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
|
||||
// }))
|
||||
package endpoints
|
@ -0,0 +1,706 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to.
|
||||
type Logger interface {
|
||||
Log(...interface{})
|
||||
}
|
||||
|
||||
// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution
|
||||
// behavior.
|
||||
type DualStackEndpointState uint
|
||||
|
||||
const (
|
||||
// DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint
|
||||
// resolution.
|
||||
DualStackEndpointStateUnset DualStackEndpointState = iota
|
||||
|
||||
// DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints.
|
||||
DualStackEndpointStateEnabled
|
||||
|
||||
// DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
|
||||
DualStackEndpointStateDisabled
|
||||
)
|
||||
|
||||
// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
|
||||
type FIPSEndpointState uint
|
||||
|
||||
const (
|
||||
// FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
|
||||
FIPSEndpointStateUnset FIPSEndpointState = iota
|
||||
|
||||
// FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
|
||||
FIPSEndpointStateEnabled
|
||||
|
||||
// FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
|
||||
FIPSEndpointStateDisabled
|
||||
)
|
||||
|
||||
// Options provide the configuration needed to direct how the
|
||||
// endpoints will be resolved.
|
||||
type Options struct {
|
||||
// DisableSSL forces the endpoint to be resolved as HTTP.
|
||||
// instead of HTTPS if the service supports it.
|
||||
DisableSSL bool
|
||||
|
||||
// Sets the resolver to resolve the endpoint as a dualstack endpoint
|
||||
// for the service. If dualstack support for a service is not known and
|
||||
// StrictMatching is not enabled a dualstack endpoint for the service will
|
||||
// be returned. This endpoint may not be valid. If StrictMatching is
|
||||
// enabled only services that are known to support dualstack will return
|
||||
// dualstack endpoints.
|
||||
//
|
||||
// Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
|
||||
// UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
|
||||
// moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
|
||||
// precedence then this option.
|
||||
UseDualStack bool
|
||||
|
||||
// Sets the resolver to resolve a dual-stack endpoint for the service.
|
||||
UseDualStackEndpoint DualStackEndpointState
|
||||
|
||||
// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
|
||||
UseFIPSEndpoint FIPSEndpointState
|
||||
|
||||
// Enables strict matching of services and regions resolved endpoints.
|
||||
// If the partition doesn't enumerate the exact service and region an
|
||||
// error will be returned. This option will prevent returning endpoints
|
||||
// that look valid, but may not resolve to any real endpoint.
|
||||
StrictMatching bool
|
||||
|
||||
// Enables resolving a service endpoint based on the region provided if the
|
||||
// service does not exist. The service endpoint ID will be used as the service
|
||||
// domain name prefix. By default the endpoint resolver requires the service
|
||||
// to be known when resolving endpoints.
|
||||
//
|
||||
// If resolving an endpoint on the partition list the provided region will
|
||||
// be used to determine which partition's domain name pattern to the service
|
||||
// endpoint ID with. If both the service and region are unknown and resolving
|
||||
// the endpoint on partition list an UnknownEndpointError error will be returned.
|
||||
//
|
||||
// If resolving and endpoint on a partition specific resolver that partition's
|
||||
// domain name pattern will be used with the service endpoint ID. If both
|
||||
// region and service do not exist when resolving an endpoint on a specific
|
||||
// partition the partition's domain pattern will be used to combine the
|
||||
// endpoint and region together.
|
||||
//
|
||||
// This option is ignored if StrictMatching is enabled.
|
||||
ResolveUnknownService bool
|
||||
|
||||
// Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
|
||||
EC2MetadataEndpointMode EC2IMDSEndpointModeState
|
||||
|
||||
// STS Regional Endpoint flag helps with resolving the STS endpoint
|
||||
STSRegionalEndpoint STSRegionalEndpoint
|
||||
|
||||
// S3 Regional Endpoint flag helps with resolving the S3 endpoint
|
||||
S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
|
||||
|
||||
// ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
|
||||
// over the region name passed to the ResolveEndpoint call.
|
||||
ResolvedRegion string
|
||||
|
||||
// Logger is the logger that will be used to log messages.
|
||||
Logger Logger
|
||||
|
||||
// Determines whether logging of deprecated endpoints usage is enabled.
|
||||
LogDeprecated bool
|
||||
}
|
||||
|
||||
func (o Options) getEndpointVariant(service string) (v endpointVariant) {
|
||||
const s3 = "s3"
|
||||
const s3Control = "s3-control"
|
||||
|
||||
if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) ||
|
||||
((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) {
|
||||
v |= dualStackVariant
|
||||
}
|
||||
if o.UseFIPSEndpoint == FIPSEndpointStateEnabled {
|
||||
v |= fipsVariant
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode.
|
||||
type EC2IMDSEndpointModeState uint
|
||||
|
||||
// Enumeration values for EC2IMDSEndpointModeState
|
||||
const (
|
||||
EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota
|
||||
EC2IMDSEndpointModeStateIPv4
|
||||
EC2IMDSEndpointModeStateIPv6
|
||||
)
|
||||
|
||||
// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset
|
||||
func (e *EC2IMDSEndpointModeState) SetFromString(v string) error {
|
||||
v = strings.TrimSpace(v)
|
||||
|
||||
switch {
|
||||
case len(v) == 0:
|
||||
*e = EC2IMDSEndpointModeStateUnset
|
||||
case strings.EqualFold(v, "IPv6"):
|
||||
*e = EC2IMDSEndpointModeStateIPv6
|
||||
case strings.EqualFold(v, "IPv4"):
|
||||
*e = EC2IMDSEndpointModeStateIPv4
|
||||
default:
|
||||
return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint
|
||||
// options.
|
||||
type STSRegionalEndpoint int
|
||||
|
||||
func (e STSRegionalEndpoint) String() string {
|
||||
switch e {
|
||||
case LegacySTSEndpoint:
|
||||
return "legacy"
|
||||
case RegionalSTSEndpoint:
|
||||
return "regional"
|
||||
case UnsetSTSEndpoint:
|
||||
return ""
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
// UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
|
||||
UnsetSTSEndpoint STSRegionalEndpoint = iota
|
||||
|
||||
// LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
|
||||
// to use legacy endpoints.
|
||||
LegacySTSEndpoint
|
||||
|
||||
// RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
|
||||
// to use regional endpoints.
|
||||
RegionalSTSEndpoint
|
||||
)
|
||||
|
||||
// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
|
||||
// on the input string provided in env config or shared config by the user.
|
||||
//
|
||||
// `legacy`, `regional` are the only case-insensitive valid strings for
|
||||
// resolving the STS regional Endpoint flag.
|
||||
func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
|
||||
switch {
|
||||
case strings.EqualFold(s, "legacy"):
|
||||
return LegacySTSEndpoint, nil
|
||||
case strings.EqualFold(s, "regional"):
|
||||
return RegionalSTSEndpoint, nil
|
||||
default:
|
||||
return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1
|
||||
// Regional Endpoint options.
|
||||
type S3UsEast1RegionalEndpoint int
|
||||
|
||||
func (e S3UsEast1RegionalEndpoint) String() string {
|
||||
switch e {
|
||||
case LegacyS3UsEast1Endpoint:
|
||||
return "legacy"
|
||||
case RegionalS3UsEast1Endpoint:
|
||||
return "regional"
|
||||
case UnsetS3UsEast1Endpoint:
|
||||
return ""
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
// UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not
|
||||
// specified.
|
||||
UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota
|
||||
|
||||
// LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
|
||||
// specified to use legacy endpoints.
|
||||
LegacyS3UsEast1Endpoint
|
||||
|
||||
// RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
|
||||
// specified to use regional endpoints.
|
||||
RegionalS3UsEast1Endpoint
|
||||
)
|
||||
|
||||
// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based
|
||||
// on the input string provided in env config or shared config by the user.
|
||||
//
|
||||
// `legacy`, `regional` are the only case-insensitive valid strings for
|
||||
// resolving the S3 regional Endpoint flag.
|
||||
func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) {
|
||||
switch {
|
||||
case strings.EqualFold(s, "legacy"):
|
||||
return LegacyS3UsEast1Endpoint, nil
|
||||
case strings.EqualFold(s, "regional"):
|
||||
return RegionalS3UsEast1Endpoint, nil
|
||||
default:
|
||||
return UnsetS3UsEast1Endpoint,
|
||||
fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together.
|
||||
func (o *Options) Set(optFns ...func(*Options)) {
|
||||
for _, fn := range optFns {
|
||||
fn(o)
|
||||
}
|
||||
}
|
||||
|
||||
// DisableSSLOption sets the DisableSSL options. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func DisableSSLOption(o *Options) {
|
||||
o.DisableSSL = true
|
||||
}
|
||||
|
||||
// UseDualStackOption sets the UseDualStack option. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
//
|
||||
// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint.
|
||||
// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option.
|
||||
func UseDualStackOption(o *Options) {
|
||||
o.UseDualStack = true
|
||||
}
|
||||
|
||||
// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func UseDualStackEndpointOption(o *Options) {
|
||||
o.UseDualStackEndpoint = DualStackEndpointStateEnabled
|
||||
}
|
||||
|
||||
// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func UseFIPSEndpointOption(o *Options) {
|
||||
o.UseFIPSEndpoint = FIPSEndpointStateEnabled
|
||||
}
|
||||
|
||||
// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func StrictMatchingOption(o *Options) {
|
||||
o.StrictMatching = true
|
||||
}
|
||||
|
||||
// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
|
||||
// as a functional option when resolving endpoints.
|
||||
func ResolveUnknownServiceOption(o *Options) {
|
||||
o.ResolveUnknownService = true
|
||||
}
|
||||
|
||||
// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
|
||||
// STS endpoint to their regional endpoint, instead of the global endpoint.
|
||||
func STSRegionalEndpointOption(o *Options) {
|
||||
o.STSRegionalEndpoint = RegionalSTSEndpoint
|
||||
}
|
||||
|
||||
// A Resolver provides the interface for functionality to resolve endpoints.
|
||||
// The build in Partition and DefaultResolver return value satisfy this interface.
|
||||
type Resolver interface {
|
||||
EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
||||
}
|
||||
|
||||
// ResolverFunc is a helper utility that wraps a function so it satisfies the
|
||||
// Resolver interface. This is useful when you want to add additional endpoint
|
||||
// resolving logic, or stub out specific endpoints with custom values.
|
||||
type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
||||
|
||||
// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
|
||||
func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return fn(service, region, opts...)
|
||||
}
|
||||
|
||||
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||
|
||||
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
||||
// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
|
||||
//
|
||||
// If disableSSL is set, it will only set the URL's scheme if the URL does not
|
||||
// contain a scheme.
|
||||
func AddScheme(endpoint string, disableSSL bool) string {
|
||||
if !schemeRE.MatchString(endpoint) {
|
||||
scheme := "https"
|
||||
if disableSSL {
|
||||
scheme = "http"
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
||||
}
|
||||
|
||||
return endpoint
|
||||
}
|
||||
|
||||
// EnumPartitions a provides a way to retrieve the underlying partitions that
|
||||
// make up the SDK's default Resolver, or any resolver decoded from a model
|
||||
// file.
|
||||
//
|
||||
// Use this interface with DefaultResolver and DecodeModels to get the list of
|
||||
// Partitions.
|
||||
type EnumPartitions interface {
|
||||
Partitions() []Partition
|
||||
}
|
||||
|
||||
// RegionsForService returns a map of regions for the partition and service.
|
||||
// If either the partition or service does not exist false will be returned
|
||||
// as the second parameter.
|
||||
//
|
||||
// This example shows how to get the regions for DynamoDB in the AWS partition.
|
||||
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
|
||||
//
|
||||
// This is equivalent to using the partition directly.
|
||||
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
|
||||
func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
|
||||
for _, p := range ps {
|
||||
if p.ID() != partitionID {
|
||||
continue
|
||||
}
|
||||
if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) {
|
||||
break
|
||||
}
|
||||
|
||||
s := Service{
|
||||
id: serviceID,
|
||||
p: p.p,
|
||||
}
|
||||
return s.Regions(), true
|
||||
}
|
||||
|
||||
return map[string]Region{}, false
|
||||
}
|
||||
|
||||
// PartitionForRegion returns the first partition which includes the region
|
||||
// passed in. This includes both known regions and regions which match
|
||||
// a pattern supported by the partition which may include regions that are
|
||||
// not explicitly known by the partition. Use the Regions method of the
|
||||
// returned Partition if explicit support is needed.
|
||||
func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
|
||||
for _, p := range ps {
|
||||
if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
|
||||
return Partition{}, false
|
||||
}
|
||||
|
||||
// A Partition provides the ability to enumerate the partition's regions
|
||||
// and services.
|
||||
type Partition struct {
|
||||
id, dnsSuffix string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// DNSSuffix returns the base domain name of the partition.
|
||||
func (p Partition) DNSSuffix() string { return p.dnsSuffix }
|
||||
|
||||
// ID returns the identifier of the partition.
|
||||
func (p Partition) ID() string { return p.id }
|
||||
|
||||
// EndpointFor attempts to resolve the endpoint based on service and region.
|
||||
// See Options for information on configuring how the endpoint is resolved.
|
||||
//
|
||||
// If the service cannot be found in the metadata the UnknownServiceError
|
||||
// error will be returned. This validation will occur regardless if
|
||||
// StrictMatching is enabled. To enable resolving unknown services set the
|
||||
// "ResolveUnknownService" option to true. When StrictMatching is disabled
|
||||
// this option allows the partition resolver to resolve a endpoint based on
|
||||
// the service endpoint ID provided.
|
||||
//
|
||||
// When resolving endpoints you can choose to enable StrictMatching. This will
|
||||
// require the provided service and region to be known by the partition.
|
||||
// If the endpoint cannot be strictly resolved an error will be returned. This
|
||||
// mode is useful to ensure the endpoint resolved is valid. Without
|
||||
// StrictMatching enabled the endpoint returned may look valid but may not work.
|
||||
// StrictMatching requires the SDK to be updated if you want to take advantage
|
||||
// of new regions and services expansions.
|
||||
//
|
||||
// Errors that can be returned.
|
||||
// * UnknownServiceError
|
||||
// * UnknownEndpointError
|
||||
func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return p.p.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// Regions returns a map of Regions indexed by their ID. This is useful for
|
||||
// enumerating over the regions in a partition.
|
||||
func (p Partition) Regions() map[string]Region {
|
||||
rs := make(map[string]Region, len(p.p.Regions))
|
||||
for id, r := range p.p.Regions {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Services returns a map of Service indexed by their ID. This is useful for
|
||||
// enumerating over the services in a partition.
|
||||
func (p Partition) Services() map[string]Service {
|
||||
ss := make(map[string]Service, len(p.p.Services))
|
||||
|
||||
for id := range p.p.Services {
|
||||
ss[id] = Service{
|
||||
id: id,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
// Since we have removed the customization that injected this into the model
|
||||
// we still need to pretend that this is a modeled service.
|
||||
if _, ok := ss[Ec2metadataServiceID]; !ok {
|
||||
ss[Ec2metadataServiceID] = Service{
|
||||
id: Ec2metadataServiceID,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// A Region provides information about a region, and ability to resolve an
|
||||
// endpoint from the context of a region, given a service.
|
||||
type Region struct {
|
||||
id, desc string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the region's identifier.
|
||||
func (r Region) ID() string { return r.id }
|
||||
|
||||
// Description returns the region's description. The region description
|
||||
// is free text, it can be empty, and it may change between SDK releases.
|
||||
func (r Region) Description() string { return r.desc }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of the region given
|
||||
// a service. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return r.p.EndpointFor(service, r.id, opts...)
|
||||
}
|
||||
|
||||
// Services returns a list of all services that are known to be in this region.
|
||||
func (r Region) Services() map[string]Service {
|
||||
ss := map[string]Service{}
|
||||
for id, s := range r.p.Services {
|
||||
if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok {
|
||||
ss[id] = Service{
|
||||
id: id,
|
||||
p: r.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// A Service provides information about a service, and ability to resolve an
|
||||
// endpoint from the context of a service, given a region.
|
||||
type Service struct {
|
||||
id string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the identifier for the service.
|
||||
func (s Service) ID() string { return s.id }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of a service given
|
||||
// a region. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return s.p.EndpointFor(s.id, region, opts...)
|
||||
}
|
||||
|
||||
// Regions returns a map of Regions that the service is present in.
|
||||
//
|
||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
||||
// an URL that can be resolved to a instance of a service.
|
||||
func (s Service) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
|
||||
service, ok := s.p.Services[s.id]
|
||||
|
||||
// Since ec2metadata customization has been removed we need to check
|
||||
// if it was defined in non-standard endpoints.json file. If it's not
|
||||
// then we can return the empty map as there is no regional-endpoints for IMDS.
|
||||
// Otherwise, we iterate need to iterate the non-standard model.
|
||||
if s.id == Ec2metadataServiceID && !ok {
|
||||
return rs
|
||||
}
|
||||
|
||||
for id := range service.Endpoints {
|
||||
if id.Variant != 0 {
|
||||
continue
|
||||
}
|
||||
if r, ok := s.p.Regions[id.Region]; ok {
|
||||
rs[id.Region] = Region{
|
||||
id: id.Region,
|
||||
desc: r.Description,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Endpoints returns a map of Endpoints indexed by their ID for all known
|
||||
// endpoints for a service.
|
||||
//
|
||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
||||
// an URL that can be resolved to a instance of a service.
|
||||
func (s Service) Endpoints() map[string]Endpoint {
|
||||
es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints))
|
||||
for id := range s.p.Services[s.id].Endpoints {
|
||||
if id.Variant != 0 {
|
||||
continue
|
||||
}
|
||||
es[id.Region] = Endpoint{
|
||||
id: id.Region,
|
||||
serviceID: s.id,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
|
||||
return es
|
||||
}
|
||||
|
||||
// A Endpoint provides information about endpoints, and provides the ability
|
||||
// to resolve that endpoint for the service, and the region the endpoint
|
||||
// represents.
|
||||
type Endpoint struct {
|
||||
id string
|
||||
serviceID string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the identifier for an endpoint.
|
||||
func (e Endpoint) ID() string { return e.id }
|
||||
|
||||
// ServiceID returns the identifier the endpoint belongs to.
|
||||
func (e Endpoint) ServiceID() string { return e.serviceID }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of a service and
|
||||
// region the endpoint represents. See Partition.EndpointFor for usage and
|
||||
// errors that can be returned.
|
||||
func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return e.p.EndpointFor(e.serviceID, e.id, opts...)
|
||||
}
|
||||
|
||||
// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
|
||||
// service, and region.
|
||||
type ResolvedEndpoint struct {
|
||||
// The endpoint URL
|
||||
URL string
|
||||
|
||||
// The endpoint partition
|
||||
PartitionID string
|
||||
|
||||
// The region that should be used for signing requests.
|
||||
SigningRegion string
|
||||
|
||||
// The service name that should be used for signing requests.
|
||||
SigningName string
|
||||
|
||||
// States that the signing name for this endpoint was derived from metadata
|
||||
// passed in, but was not explicitly modeled.
|
||||
SigningNameDerived bool
|
||||
|
||||
// The signing method that should be used for signing requests.
|
||||
SigningMethod string
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the requestError struct and not conflict with the error.Error() method.
|
||||
type awsError awserr.Error
|
||||
|
||||
// A EndpointNotFoundError is returned when in StrictMatching mode, and the
|
||||
// endpoint for the service and region cannot be found in any of the partitions.
|
||||
type EndpointNotFoundError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Region string
|
||||
}
|
||||
|
||||
// A UnknownServiceError is returned when the service does not resolve to an
|
||||
// endpoint. Includes a list of all known services for the partition. Returned
|
||||
// when a partition does not support the service.
|
||||
type UnknownServiceError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Known []string
|
||||
}
|
||||
|
||||
// NewUnknownServiceError builds and returns UnknownServiceError.
|
||||
func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
|
||||
return UnknownServiceError{
|
||||
awsError: awserr.New("UnknownServiceError",
|
||||
"could not resolve endpoint for unknown service", nil),
|
||||
Partition: p,
|
||||
Service: s,
|
||||
Known: known,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownServiceError) Error() string {
|
||||
extra := fmt.Sprintf("partition: %q, service: %q",
|
||||
e.Partition, e.Service)
|
||||
if len(e.Known) > 0 {
|
||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownServiceError) String() string {
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// A UnknownEndpointError is returned when in StrictMatching mode and the
|
||||
// service is valid, but the region does not resolve to an endpoint. Includes
|
||||
// a list of all known endpoints for the service.
|
||||
type UnknownEndpointError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Region string
|
||||
Known []string
|
||||
}
|
||||
|
||||
// NewUnknownEndpointError builds and returns UnknownEndpointError.
|
||||
func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
|
||||
return UnknownEndpointError{
|
||||
awsError: awserr.New("UnknownEndpointError",
|
||||
"could not resolve endpoint", nil),
|
||||
Partition: p,
|
||||
Service: s,
|
||||
Region: r,
|
||||
Known: known,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownEndpointError) Error() string {
|
||||
extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
|
||||
e.Partition, e.Service, e.Region)
|
||||
if len(e.Known) > 0 {
|
||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownEndpointError) String() string {
|
||||
return e.Error()
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
package endpoints
|
||||
|
||||
var legacyGlobalRegions = map[string]map[string]struct{}{
|
||||
"sts": {
|
||||
"ap-northeast-1": {},
|
||||
"ap-south-1": {},
|
||||
"ap-southeast-1": {},
|
||||
"ap-southeast-2": {},
|
||||
"ca-central-1": {},
|
||||
"eu-central-1": {},
|
||||
"eu-north-1": {},
|
||||
"eu-west-1": {},
|
||||
"eu-west-2": {},
|
||||
"eu-west-3": {},
|
||||
"sa-east-1": {},
|
||||
"us-east-1": {},
|
||||
"us-east-2": {},
|
||||
"us-west-1": {},
|
||||
"us-west-2": {},
|
||||
},
|
||||
"s3": {
|
||||
"us-east-1": {},
|
||||
},
|
||||
}
|
@ -0,0 +1,594 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest"
|
||||
ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest"
|
||||
)
|
||||
|
||||
const dnsSuffixTemplateKey = "{dnsSuffix}"
|
||||
|
||||
// defaultKey is a compound map key of a variant and other values.
|
||||
type defaultKey struct {
|
||||
Variant endpointVariant
|
||||
ServiceVariant serviceVariant
|
||||
}
|
||||
|
||||
// endpointKey is a compound map key of a region and associated variant value.
|
||||
type endpointKey struct {
|
||||
Region string
|
||||
Variant endpointVariant
|
||||
}
|
||||
|
||||
// endpointVariant is a bit field to describe the endpoints attributes.
|
||||
type endpointVariant uint64
|
||||
|
||||
// serviceVariant is a bit field to describe the service endpoint attributes.
|
||||
type serviceVariant uint64
|
||||
|
||||
const (
|
||||
// fipsVariant indicates that the endpoint is FIPS capable.
|
||||
fipsVariant endpointVariant = 1 << (64 - 1 - iota)
|
||||
|
||||
// dualStackVariant indicates that the endpoint is DualStack capable.
|
||||
dualStackVariant
|
||||
)
|
||||
|
||||
var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
|
||||
|
||||
type partitions []partition
|
||||
|
||||
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
var opt Options
|
||||
opt.Set(opts...)
|
||||
|
||||
if len(opt.ResolvedRegion) > 0 {
|
||||
region = opt.ResolvedRegion
|
||||
}
|
||||
|
||||
for i := 0; i < len(ps); i++ {
|
||||
if !ps[i].canResolveEndpoint(service, region, opt) {
|
||||
continue
|
||||
}
|
||||
|
||||
return ps[i].EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// If loose matching fallback to first partition format to use
|
||||
// when resolving the endpoint.
|
||||
if !opt.StrictMatching && len(ps) > 0 {
|
||||
return ps[0].EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
|
||||
}
|
||||
|
||||
// Partitions satisfies the EnumPartitions interface and returns a list
|
||||
// of Partitions representing each partition represented in the SDK's
|
||||
// endpoints model.
|
||||
func (ps partitions) Partitions() []Partition {
|
||||
parts := make([]Partition, 0, len(ps))
|
||||
for i := 0; i < len(ps); i++ {
|
||||
parts = append(parts, ps[i].Partition())
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
type endpointWithVariants struct {
|
||||
endpoint
|
||||
Variants []endpointWithTags `json:"variants"`
|
||||
}
|
||||
|
||||
type endpointWithTags struct {
|
||||
endpoint
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
type endpointDefaults map[defaultKey]endpoint
|
||||
|
||||
func (p *endpointDefaults) UnmarshalJSON(data []byte) error {
|
||||
if *p == nil {
|
||||
*p = make(endpointDefaults)
|
||||
}
|
||||
|
||||
var e endpointWithVariants
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
(*p)[defaultKey{Variant: 0}] = e.endpoint
|
||||
|
||||
e.Hostname = ""
|
||||
e.DNSSuffix = ""
|
||||
|
||||
for _, variant := range e.Variants {
|
||||
endpointVariant, unknown := parseVariantTags(variant.Tags)
|
||||
if unknown {
|
||||
continue
|
||||
}
|
||||
|
||||
var ve endpoint
|
||||
ve.mergeIn(e.endpoint)
|
||||
ve.mergeIn(variant.endpoint)
|
||||
|
||||
(*p)[defaultKey{Variant: endpointVariant}] = ve
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) {
|
||||
if len(tags) == 0 {
|
||||
unknown = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
switch {
|
||||
case strings.EqualFold("fips", tag):
|
||||
ev |= fipsVariant
|
||||
case strings.EqualFold("dualstack", tag):
|
||||
ev |= dualStackVariant
|
||||
default:
|
||||
unknown = true
|
||||
}
|
||||
}
|
||||
return ev, unknown
|
||||
}
|
||||
|
||||
type partition struct {
|
||||
ID string `json:"partition"`
|
||||
Name string `json:"partitionName"`
|
||||
DNSSuffix string `json:"dnsSuffix"`
|
||||
RegionRegex regionRegex `json:"regionRegex"`
|
||||
Defaults endpointDefaults `json:"defaults"`
|
||||
Regions regions `json:"regions"`
|
||||
Services services `json:"services"`
|
||||
}
|
||||
|
||||
func (p partition) Partition() Partition {
|
||||
return Partition{
|
||||
dnsSuffix: p.DNSSuffix,
|
||||
id: p.ID,
|
||||
p: &p,
|
||||
}
|
||||
}
|
||||
|
||||
func (p partition) canResolveEndpoint(service, region string, options Options) bool {
|
||||
s, hasService := p.Services[service]
|
||||
_, hasEndpoint := s.Endpoints[endpointKey{
|
||||
Region: region,
|
||||
Variant: options.getEndpointVariant(service),
|
||||
}]
|
||||
|
||||
if hasEndpoint && hasService {
|
||||
return true
|
||||
}
|
||||
|
||||
if options.StrictMatching {
|
||||
return false
|
||||
}
|
||||
|
||||
return p.RegionRegex.MatchString(region)
|
||||
}
|
||||
|
||||
func allowLegacyEmptyRegion(service string) bool {
|
||||
legacy := map[string]struct{}{
|
||||
"budgets": {},
|
||||
"ce": {},
|
||||
"chime": {},
|
||||
"cloudfront": {},
|
||||
"ec2metadata": {},
|
||||
"iam": {},
|
||||
"importexport": {},
|
||||
"organizations": {},
|
||||
"route53": {},
|
||||
"sts": {},
|
||||
"support": {},
|
||||
"waf": {},
|
||||
}
|
||||
|
||||
_, allowed := legacy[service]
|
||||
return allowed
|
||||
}
|
||||
|
||||
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
|
||||
var opt Options
|
||||
opt.Set(opts...)
|
||||
|
||||
if len(opt.ResolvedRegion) > 0 {
|
||||
region = opt.ResolvedRegion
|
||||
}
|
||||
|
||||
s, hasService := p.Services[service]
|
||||
|
||||
if service == Ec2metadataServiceID && !hasService {
|
||||
endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode)
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
|
||||
// Only return error if the resolver will not fallback to creating
|
||||
// endpoint based on service endpoint ID passed in.
|
||||
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
|
||||
}
|
||||
|
||||
if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
|
||||
region = s.PartitionEndpoint
|
||||
}
|
||||
|
||||
if r, ok := isLegacyGlobalRegion(service, region, opt); ok {
|
||||
region = r
|
||||
}
|
||||
|
||||
variant := opt.getEndpointVariant(service)
|
||||
|
||||
endpoints := s.Endpoints
|
||||
|
||||
serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}]
|
||||
// If we searched for a variant which may have no explicit service defaults,
|
||||
// then we need to inherit the standard service defaults except the hostname and dnsSuffix
|
||||
if variant != 0 && !hasServiceDefault {
|
||||
serviceDefaults = s.Defaults[defaultKey{}]
|
||||
serviceDefaults.Hostname = ""
|
||||
serviceDefaults.DNSSuffix = ""
|
||||
}
|
||||
|
||||
partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}]
|
||||
|
||||
var dnsSuffix string
|
||||
if len(serviceDefaults.DNSSuffix) > 0 {
|
||||
dnsSuffix = serviceDefaults.DNSSuffix
|
||||
} else if variant == 0 {
|
||||
// For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for
|
||||
// a non-variant endpoint then we need to set the dnsSuffix.
|
||||
dnsSuffix = p.DNSSuffix
|
||||
}
|
||||
|
||||
noDefaults := !hasServiceDefault && !hasPartitionDefault
|
||||
|
||||
e, hasEndpoint := s.endpointForRegion(region, endpoints, variant)
|
||||
if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) {
|
||||
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant))
|
||||
}
|
||||
|
||||
defs := []endpoint{partitionDefaults, serviceDefaults}
|
||||
|
||||
return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt)
|
||||
}
|
||||
|
||||
func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint {
|
||||
switch mode {
|
||||
case EC2IMDSEndpointModeStateIPv6:
|
||||
return ResolvedEndpoint{
|
||||
URL: ec2MetadataEndpointIPv6,
|
||||
PartitionID: partitionID,
|
||||
SigningRegion: "aws-global",
|
||||
SigningName: service,
|
||||
SigningNameDerived: true,
|
||||
SigningMethod: "v4",
|
||||
}
|
||||
case EC2IMDSEndpointModeStateIPv4:
|
||||
fallthrough
|
||||
default:
|
||||
return ResolvedEndpoint{
|
||||
URL: ec2MetadataEndpointIPv4,
|
||||
PartitionID: partitionID,
|
||||
SigningRegion: "aws-global",
|
||||
SigningName: service,
|
||||
SigningNameDerived: true,
|
||||
SigningMethod: "v4",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) {
|
||||
if opt.getEndpointVariant(service) != 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
const (
|
||||
sts = "sts"
|
||||
s3 = "s3"
|
||||
awsGlobal = "aws-global"
|
||||
)
|
||||
|
||||
switch {
|
||||
case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint:
|
||||
return region, false
|
||||
case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint:
|
||||
return region, false
|
||||
default:
|
||||
if _, ok := legacyGlobalRegions[service][region]; ok {
|
||||
return awsGlobal, true
|
||||
}
|
||||
}
|
||||
|
||||
return region, false
|
||||
}
|
||||
|
||||
func serviceList(ss services) []string {
|
||||
list := make([]string, 0, len(ss))
|
||||
for k := range ss {
|
||||
list = append(list, k)
|
||||
}
|
||||
return list
|
||||
}
|
||||
func endpointList(es serviceEndpoints, variant endpointVariant) []string {
|
||||
list := make([]string, 0, len(es))
|
||||
for k := range es {
|
||||
if k.Variant != variant {
|
||||
continue
|
||||
}
|
||||
list = append(list, k.Region)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
type regionRegex struct {
|
||||
*regexp.Regexp
|
||||
}
|
||||
|
||||
func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
|
||||
// Strip leading and trailing quotes
|
||||
regex, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to strip quotes from regex, %v", err)
|
||||
}
|
||||
|
||||
rr.Regexp, err = regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unmarshal region regex, %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type regions map[string]region
|
||||
|
||||
type region struct {
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type services map[string]service
|
||||
|
||||
type service struct {
|
||||
PartitionEndpoint string `json:"partitionEndpoint"`
|
||||
IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
|
||||
Defaults endpointDefaults `json:"defaults"`
|
||||
Endpoints serviceEndpoints `json:"endpoints"`
|
||||
}
|
||||
|
||||
func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) {
|
||||
if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok {
|
||||
return e, true
|
||||
}
|
||||
|
||||
if s.IsRegionalized == boxedFalse {
|
||||
return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint
|
||||
}
|
||||
|
||||
// Unable to find any matching endpoint, return
|
||||
// blank that will be used for generic endpoint creation.
|
||||
return endpoint{}, false
|
||||
}
|
||||
|
||||
type serviceEndpoints map[endpointKey]endpoint
|
||||
|
||||
func (s *serviceEndpoints) UnmarshalJSON(data []byte) error {
|
||||
if *s == nil {
|
||||
*s = make(serviceEndpoints)
|
||||
}
|
||||
|
||||
var regionToEndpoint map[string]endpointWithVariants
|
||||
|
||||
if err := json.Unmarshal(data, ®ionToEndpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for region, e := range regionToEndpoint {
|
||||
(*s)[endpointKey{Region: region}] = e.endpoint
|
||||
|
||||
e.Hostname = ""
|
||||
e.DNSSuffix = ""
|
||||
|
||||
for _, variant := range e.Variants {
|
||||
endpointVariant, unknown := parseVariantTags(variant.Tags)
|
||||
if unknown {
|
||||
continue
|
||||
}
|
||||
|
||||
var ve endpoint
|
||||
ve.mergeIn(e.endpoint)
|
||||
ve.mergeIn(variant.endpoint)
|
||||
|
||||
(*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type endpoint struct {
|
||||
Hostname string `json:"hostname"`
|
||||
Protocols []string `json:"protocols"`
|
||||
CredentialScope credentialScope `json:"credentialScope"`
|
||||
|
||||
DNSSuffix string `json:"dnsSuffix"`
|
||||
|
||||
// Signature Version not used
|
||||
SignatureVersions []string `json:"signatureVersions"`
|
||||
|
||||
// SSLCommonName not used.
|
||||
SSLCommonName string `json:"sslCommonName"`
|
||||
|
||||
Deprecated boxedBool `json:"deprecated"`
|
||||
}
|
||||
|
||||
// isZero returns whether the endpoint structure is an empty (zero) value.
|
||||
func (e endpoint) isZero() bool {
|
||||
switch {
|
||||
case len(e.Hostname) != 0:
|
||||
return false
|
||||
case len(e.Protocols) != 0:
|
||||
return false
|
||||
case e.CredentialScope != (credentialScope{}):
|
||||
return false
|
||||
case len(e.SignatureVersions) != 0:
|
||||
return false
|
||||
case len(e.SSLCommonName) != 0:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
defaultProtocol = "https"
|
||||
defaultSigner = "v4"
|
||||
)
|
||||
|
||||
var (
|
||||
protocolPriority = []string{"https", "http"}
|
||||
signerPriority = []string{"v4", "v2"}
|
||||
)
|
||||
|
||||
func getByPriority(s []string, p []string, def string) string {
|
||||
if len(s) == 0 {
|
||||
return def
|
||||
}
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
for j := 0; j < len(s); j++ {
|
||||
if s[j] == p[i] {
|
||||
return s[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s[0]
|
||||
}
|
||||
|
||||
func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
|
||||
var merged endpoint
|
||||
for _, def := range defs {
|
||||
merged.mergeIn(def)
|
||||
}
|
||||
merged.mergeIn(e)
|
||||
e = merged
|
||||
|
||||
signingRegion := e.CredentialScope.Region
|
||||
if len(signingRegion) == 0 {
|
||||
signingRegion = region
|
||||
}
|
||||
|
||||
signingName := e.CredentialScope.Service
|
||||
var signingNameDerived bool
|
||||
if len(signingName) == 0 {
|
||||
signingName = service
|
||||
signingNameDerived = true
|
||||
}
|
||||
|
||||
hostname := e.Hostname
|
||||
|
||||
if !validateInputRegion(region) {
|
||||
return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
|
||||
}
|
||||
|
||||
if len(merged.DNSSuffix) > 0 {
|
||||
dnsSuffix = merged.DNSSuffix
|
||||
}
|
||||
|
||||
u := strings.Replace(hostname, "{service}", service, 1)
|
||||
u = strings.Replace(u, "{region}", region, 1)
|
||||
u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1)
|
||||
|
||||
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
|
||||
u = fmt.Sprintf("%s://%s", scheme, u)
|
||||
|
||||
if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil {
|
||||
opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u))
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{
|
||||
URL: u,
|
||||
PartitionID: partitionID,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getEndpointScheme(protocols []string, disableSSL bool) string {
|
||||
if disableSSL {
|
||||
return "http"
|
||||
}
|
||||
|
||||
return getByPriority(protocols, protocolPriority, defaultProtocol)
|
||||
}
|
||||
|
||||
func (e *endpoint) mergeIn(other endpoint) {
|
||||
if len(other.Hostname) > 0 {
|
||||
e.Hostname = other.Hostname
|
||||
}
|
||||
if len(other.Protocols) > 0 {
|
||||
e.Protocols = other.Protocols
|
||||
}
|
||||
if len(other.SignatureVersions) > 0 {
|
||||
e.SignatureVersions = other.SignatureVersions
|
||||
}
|
||||
if len(other.CredentialScope.Region) > 0 {
|
||||
e.CredentialScope.Region = other.CredentialScope.Region
|
||||
}
|
||||
if len(other.CredentialScope.Service) > 0 {
|
||||
e.CredentialScope.Service = other.CredentialScope.Service
|
||||
}
|
||||
if len(other.SSLCommonName) > 0 {
|
||||
e.SSLCommonName = other.SSLCommonName
|
||||
}
|
||||
if len(other.DNSSuffix) > 0 {
|
||||
e.DNSSuffix = other.DNSSuffix
|
||||
}
|
||||
if other.Deprecated != boxedBoolUnset {
|
||||
e.Deprecated = other.Deprecated
|
||||
}
|
||||
}
|
||||
|
||||
type credentialScope struct {
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
}
|
||||
|
||||
type boxedBool int
|
||||
|
||||
func (b *boxedBool) UnmarshalJSON(buf []byte) error {
|
||||
v, err := strconv.ParseBool(string(buf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v {
|
||||
*b = boxedTrue
|
||||
} else {
|
||||
*b = boxedFalse
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
boxedBoolUnset boxedBool = iota
|
||||
boxedFalse
|
||||
boxedTrue
|
||||
)
|
||||
|
||||
func validateInputRegion(region string) bool {
|
||||
return regionValidationRegex.MatchString(region)
|
||||
}
|
@ -0,0 +1,412 @@
|
||||
//go:build codegen
|
||||
// +build codegen
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// A CodeGenOptions are the options for code generating the endpoints into
|
||||
// Go code from the endpoints model definition.
|
||||
type CodeGenOptions struct {
|
||||
// Options for how the model will be decoded.
|
||||
DecodeModelOptions DecodeModelOptions
|
||||
|
||||
// Disables code generation of the service endpoint prefix IDs defined in
|
||||
// the model.
|
||||
DisableGenerateServiceIDs bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together
|
||||
func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
|
||||
for _, fn := range optFns {
|
||||
fn(d)
|
||||
}
|
||||
}
|
||||
|
||||
// CodeGenModel given a endpoints model file will decode it and attempt to
|
||||
// generate Go code from the model definition. Error will be returned if
|
||||
// the code is unable to be generated, or decoded.
|
||||
func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
|
||||
var opts CodeGenOptions
|
||||
opts.Set(optFns...)
|
||||
|
||||
resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
|
||||
*d = opts.DecodeModelOptions
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := struct {
|
||||
Resolver
|
||||
CodeGenOptions
|
||||
}{
|
||||
Resolver: resolver,
|
||||
CodeGenOptions: opts,
|
||||
}
|
||||
|
||||
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
|
||||
if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
|
||||
return fmt.Errorf("failed to execute template, %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSymbol(v string) string {
|
||||
out := []rune{}
|
||||
for _, c := range strings.Title(v) {
|
||||
if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
|
||||
continue
|
||||
}
|
||||
|
||||
out = append(out, c)
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func quoteString(v string) string {
|
||||
return fmt.Sprintf("%q", v)
|
||||
}
|
||||
|
||||
func regionConstName(p, r string) string {
|
||||
return toSymbol(p) + toSymbol(r)
|
||||
}
|
||||
|
||||
func partitionGetter(id string) string {
|
||||
return fmt.Sprintf("%sPartition", toSymbol(id))
|
||||
}
|
||||
|
||||
func partitionVarName(id string) string {
|
||||
return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
|
||||
}
|
||||
|
||||
func listPartitionNames(ps partitions) string {
|
||||
names := []string{}
|
||||
switch len(ps) {
|
||||
case 1:
|
||||
return ps[0].Name
|
||||
case 2:
|
||||
return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
|
||||
default:
|
||||
for i, p := range ps {
|
||||
if i == len(ps)-1 {
|
||||
names = append(names, "and "+p.Name)
|
||||
} else {
|
||||
names = append(names, p.Name)
|
||||
}
|
||||
}
|
||||
return strings.Join(names, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
func boxedBoolIfSet(msg string, v boxedBool) string {
|
||||
switch v {
|
||||
case boxedTrue:
|
||||
return fmt.Sprintf(msg, "boxedTrue")
|
||||
case boxedFalse:
|
||||
return fmt.Sprintf(msg, "boxedFalse")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func stringIfSet(msg, v string) string {
|
||||
if len(v) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf(msg, v)
|
||||
}
|
||||
|
||||
func stringSliceIfSet(msg string, vs []string) string {
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
for _, v := range vs {
|
||||
names = append(names, `"`+v+`"`)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(msg, strings.Join(names, ","))
|
||||
}
|
||||
|
||||
func endpointIsSet(v endpoint) bool {
|
||||
return !reflect.DeepEqual(v, endpoint{})
|
||||
}
|
||||
|
||||
func serviceSet(ps partitions) map[string]struct{} {
|
||||
set := map[string]struct{}{}
|
||||
for _, p := range ps {
|
||||
for id := range p.Services {
|
||||
set[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
func endpointVariantSetter(variant endpointVariant) (string, error) {
|
||||
if variant == 0 {
|
||||
return "0", nil
|
||||
}
|
||||
|
||||
if variant > (fipsVariant | dualStackVariant) {
|
||||
return "", fmt.Errorf("unknown endpoint variant")
|
||||
}
|
||||
|
||||
var symbols []string
|
||||
if variant&fipsVariant != 0 {
|
||||
symbols = append(symbols, "fipsVariant")
|
||||
}
|
||||
if variant&dualStackVariant != 0 {
|
||||
symbols = append(symbols, "dualStackVariant")
|
||||
}
|
||||
v := strings.Join(symbols, "|")
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func endpointKeySetter(e endpointKey) (string, error) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("endpointKey{\n")
|
||||
sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region))
|
||||
if e.Variant != 0 {
|
||||
variantSetter, err := endpointVariantSetter(e.Variant)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String(), nil
|
||||
}
|
||||
|
||||
func defaultKeySetter(e defaultKey) (string, error) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("defaultKey{\n")
|
||||
if e.Variant != 0 {
|
||||
variantSetter, err := endpointVariantSetter(e.Variant)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String(), nil
|
||||
}
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"ToSymbol": toSymbol,
|
||||
"QuoteString": quoteString,
|
||||
"RegionConst": regionConstName,
|
||||
"PartitionGetter": partitionGetter,
|
||||
"PartitionVarName": partitionVarName,
|
||||
"ListPartitionNames": listPartitionNames,
|
||||
"BoxedBoolIfSet": boxedBoolIfSet,
|
||||
"StringIfSet": stringIfSet,
|
||||
"StringSliceIfSet": stringSliceIfSet,
|
||||
"EndpointIsSet": endpointIsSet,
|
||||
"ServicesSet": serviceSet,
|
||||
"EndpointVariantSetter": endpointVariantSetter,
|
||||
"EndpointKeySetter": endpointKeySetter,
|
||||
"DefaultKeySetter": defaultKeySetter,
|
||||
}
|
||||
|
||||
const v3Tmpl = `
|
||||
{{ define "defaults" -}}
|
||||
// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
{{ template "partition consts" $.Resolver }}
|
||||
|
||||
{{ range $_, $partition := $.Resolver }}
|
||||
{{ template "partition region consts" $partition }}
|
||||
{{ end }}
|
||||
|
||||
{{ if not $.DisableGenerateServiceIDs -}}
|
||||
{{ template "service consts" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ template "endpoint resolvers" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ define "partition consts" }}
|
||||
// Partition identifiers
|
||||
const (
|
||||
{{ range $_, $p := . -}}
|
||||
{{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "partition region consts" }}
|
||||
// {{ .Name }} partition's regions.
|
||||
const (
|
||||
{{ range $id, $region := .Regions -}}
|
||||
{{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "service consts" }}
|
||||
// Service identifiers
|
||||
const (
|
||||
{{ $serviceSet := ServicesSet . -}}
|
||||
{{ range $id, $_ := $serviceSet -}}
|
||||
{{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "endpoint resolvers" }}
|
||||
// DefaultResolver returns an Endpoint resolver that will be able
|
||||
// to resolve endpoints for: {{ ListPartitionNames . }}.
|
||||
//
|
||||
// Use DefaultPartitions() to get the list of the default partitions.
|
||||
func DefaultResolver() Resolver {
|
||||
return defaultPartitions
|
||||
}
|
||||
|
||||
// DefaultPartitions returns a list of the partitions the SDK is bundled
|
||||
// with. The available partitions are: {{ ListPartitionNames . }}.
|
||||
//
|
||||
// partitions := endpoints.DefaultPartitions
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
func DefaultPartitions() []Partition {
|
||||
return defaultPartitions.Partitions()
|
||||
}
|
||||
|
||||
var defaultPartitions = partitions{
|
||||
{{ range $_, $partition := . -}}
|
||||
{{ PartitionVarName $partition.ID }},
|
||||
{{ end }}
|
||||
}
|
||||
|
||||
{{ range $_, $partition := . -}}
|
||||
{{ $name := PartitionGetter $partition.ID -}}
|
||||
// {{ $name }} returns the Resolver for {{ $partition.Name }}.
|
||||
func {{ $name }}() Partition {
|
||||
return {{ PartitionVarName $partition.ID }}.Partition()
|
||||
}
|
||||
var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ define "default partitions" }}
|
||||
func DefaultPartitions() []Partition {
|
||||
return []partition{
|
||||
{{ range $_, $partition := . -}}
|
||||
// {{ ToSymbol $partition.ID}}Partition(),
|
||||
{{ end }}
|
||||
}
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
{{ define "gocode Partition" -}}
|
||||
partition{
|
||||
{{ StringIfSet "ID: %q,\n" .ID -}}
|
||||
{{ StringIfSet "Name: %q,\n" .Name -}}
|
||||
{{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
|
||||
RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
|
||||
{{ if (gt (len .Defaults) 0) -}}
|
||||
Defaults: {{ template "gocode Defaults" .Defaults -}},
|
||||
{{ end -}}
|
||||
Regions: {{ template "gocode Regions" .Regions }},
|
||||
Services: {{ template "gocode Services" .Services }},
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode RegionRegex" -}}
|
||||
regionRegex{
|
||||
Regexp: func() *regexp.Regexp{
|
||||
reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
|
||||
return reg
|
||||
}(),
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Regions" -}}
|
||||
regions{
|
||||
{{ range $id, $region := . -}}
|
||||
"{{ $id }}": {{ template "gocode Region" $region }},
|
||||
{{ end -}}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Region" -}}
|
||||
region{
|
||||
{{ StringIfSet "Description: %q,\n" .Description -}}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Services" -}}
|
||||
services{
|
||||
{{ range $id, $service := . -}}
|
||||
"{{ $id }}": {{ template "gocode Service" $service }},
|
||||
{{ end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Service" -}}
|
||||
service{
|
||||
{{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
|
||||
{{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
|
||||
{{ if (gt (len .Defaults) 0) -}}
|
||||
Defaults: {{ template "gocode Defaults" .Defaults -}},
|
||||
{{ end -}}
|
||||
{{ if .Endpoints -}}
|
||||
Endpoints: {{ template "gocode Endpoints" .Endpoints }},
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Defaults" -}}
|
||||
endpointDefaults{
|
||||
{{ range $id, $endpoint := . -}}
|
||||
{{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
|
||||
{{ end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Endpoints" -}}
|
||||
serviceEndpoints{
|
||||
{{ range $id, $endpoint := . -}}
|
||||
{{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
|
||||
{{ end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Endpoint" -}}
|
||||
endpoint{
|
||||
{{ StringIfSet "Hostname: %q,\n" .Hostname -}}
|
||||
{{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
|
||||
{{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
|
||||
{{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
|
||||
{{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
|
||||
{{ if or .CredentialScope.Region .CredentialScope.Service -}}
|
||||
CredentialScope: credentialScope{
|
||||
{{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
|
||||
{{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
|
||||
},
|
||||
{{- end }}
|
||||
{{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}}
|
||||
}
|
||||
{{- end }}
|
||||
`
|
@ -0,0 +1,13 @@
|
||||
package aws
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
||||
var (
|
||||
// ErrMissingRegion is an error that is returned if region configuration is
|
||||
// not found.
|
||||
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||
|
||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||
// resolved for a service.
|
||||
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||
)
|
@ -0,0 +1,12 @@
|
||||
package aws
|
||||
|
||||
// JSONValue is a representation of a grab bag type that will be marshaled
|
||||
// into a json string. This type can be used just like any other map.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// values := aws.JSONValue{
|
||||
// "Foo": "Bar",
|
||||
// }
|
||||
// values["Baz"] = "Qux"
|
||||
type JSONValue map[string]interface{}
|
@ -0,0 +1,121 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||
// the SDK which statements should be logged.
|
||||
type LogLevelType uint
|
||||
|
||||
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||
// not being able to take the address of a non-composite literal.
|
||||
func LogLevel(l LogLevelType) *LogLevelType {
|
||||
return &l
|
||||
}
|
||||
|
||||
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||
// is nil. Safe to use on nil value LogLevelTypes.
|
||||
func (l *LogLevelType) Value() LogLevelType {
|
||||
if l != nil {
|
||||
return *l
|
||||
}
|
||||
return LogOff
|
||||
}
|
||||
|
||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||
// LogLevel is nil, will default to LogOff comparison.
|
||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c&v == v
|
||||
}
|
||||
|
||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
|
||||
// to LogOff comparison.
|
||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c >= v
|
||||
}
|
||||
|
||||
const (
|
||||
// LogOff states that no logging should be performed by the SDK. This is the
|
||||
// default state of the SDK, and should be use to disable all logging.
|
||||
LogOff LogLevelType = iota * 0x1000
|
||||
|
||||
// LogDebug state that debug output should be logged by the SDK. This should
|
||||
// be used to inspect request made and responses received.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// Debug Logging Sub Levels
|
||||
const (
|
||||
// LogDebugWithSigning states that the SDK should log request signing and
|
||||
// presigning events. This should be used to log the signing details of
|
||||
// requests for debugging. Will also enable LogDebug.
|
||||
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||
|
||||
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||
// HTTP bodys in addition to the headers and path. This should be used to
|
||||
// see the body content of requests and responses made while using the SDK
|
||||
// Will also enable LogDebug.
|
||||
LogDebugWithHTTPBody
|
||||
|
||||
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
||||
// be retried. This should be used to log when you want to log when service
|
||||
// requests are being retried. Will also enable LogDebug.
|
||||
LogDebugWithRequestRetries
|
||||
|
||||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||
// to build, send, validate, or unmarshal.
|
||||
LogDebugWithRequestErrors
|
||||
|
||||
// LogDebugWithEventStreamBody states the SDK should log EventStream
|
||||
// request and response bodys. This should be used to log the EventStream
|
||||
// wire unmarshaled message content of requests and responses made while
|
||||
// using the SDK Will also enable LogDebug.
|
||||
LogDebugWithEventStreamBody
|
||||
|
||||
// LogDebugWithDeprecated states the SDK should log details about deprecated functionality.
|
||||
LogDebugWithDeprecated
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
// be used to provide custom logging writers for the SDK to use.
|
||||
type Logger interface {
|
||||
Log(...interface{})
|
||||
}
|
||||
|
||||
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||
// list of arguments and wrap it so the Logger interface can be used.
|
||||
//
|
||||
// Example:
|
||||
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||
// fmt.Fprintln(os.Stdout, args...)
|
||||
// })})
|
||||
type LoggerFunc func(...interface{})
|
||||
|
||||
// Log calls the wrapped function with the arguments provided
|
||||
func (f LoggerFunc) Log(args ...interface{}) {
|
||||
f(args...)
|
||||
}
|
||||
|
||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||
// use same formatting runes as the stdlib log.Logger
|
||||
func NewDefaultLogger() Logger {
|
||||
return &defaultLogger{
|
||||
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||
type defaultLogger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||
func (l defaultLogger) Log(args ...interface{}) {
|
||||
l.logger.Println(args...)
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func isErrConnectionReset(err error) bool {
|
||||
if strings.Contains(err.Error(), "read: connection reset") {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), "use of closed network connection") ||
|
||||
strings.Contains(err.Error(), "connection reset") ||
|
||||
strings.Contains(err.Error(), "broken pipe") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
@ -0,0 +1,346 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Handlers provides a collection of request handlers for various
|
||||
// stages of handling requests.
|
||||
type Handlers struct {
|
||||
Validate HandlerList
|
||||
Build HandlerList
|
||||
BuildStream HandlerList
|
||||
Sign HandlerList
|
||||
Send HandlerList
|
||||
ValidateResponse HandlerList
|
||||
Unmarshal HandlerList
|
||||
UnmarshalStream HandlerList
|
||||
UnmarshalMeta HandlerList
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
AfterRetry HandlerList
|
||||
CompleteAttempt HandlerList
|
||||
Complete HandlerList
|
||||
}
|
||||
|
||||
// Copy returns a copy of this handler's lists.
|
||||
func (h *Handlers) Copy() Handlers {
|
||||
return Handlers{
|
||||
Validate: h.Validate.copy(),
|
||||
Build: h.Build.copy(),
|
||||
BuildStream: h.BuildStream.copy(),
|
||||
Sign: h.Sign.copy(),
|
||||
Send: h.Send.copy(),
|
||||
ValidateResponse: h.ValidateResponse.copy(),
|
||||
Unmarshal: h.Unmarshal.copy(),
|
||||
UnmarshalStream: h.UnmarshalStream.copy(),
|
||||
UnmarshalError: h.UnmarshalError.copy(),
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
AfterRetry: h.AfterRetry.copy(),
|
||||
CompleteAttempt: h.CompleteAttempt.copy(),
|
||||
Complete: h.Complete.copy(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes callback functions for all handlers.
|
||||
func (h *Handlers) Clear() {
|
||||
h.Validate.Clear()
|
||||
h.Build.Clear()
|
||||
h.BuildStream.Clear()
|
||||
h.Send.Clear()
|
||||
h.Sign.Clear()
|
||||
h.Unmarshal.Clear()
|
||||
h.UnmarshalStream.Clear()
|
||||
h.UnmarshalMeta.Clear()
|
||||
h.UnmarshalError.Clear()
|
||||
h.ValidateResponse.Clear()
|
||||
h.Retry.Clear()
|
||||
h.AfterRetry.Clear()
|
||||
h.CompleteAttempt.Clear()
|
||||
h.Complete.Clear()
|
||||
}
|
||||
|
||||
// IsEmpty returns if there are no handlers in any of the handlerlists.
|
||||
func (h *Handlers) IsEmpty() bool {
|
||||
if h.Validate.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Build.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.BuildStream.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Send.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Sign.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Unmarshal.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.UnmarshalStream.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.UnmarshalMeta.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.UnmarshalError.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.ValidateResponse.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Retry.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.AfterRetry.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.CompleteAttempt.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Complete.Len() != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||
// is being run.
|
||||
type HandlerListRunItem struct {
|
||||
Index int
|
||||
Handler NamedHandler
|
||||
Request *Request
|
||||
}
|
||||
|
||||
// A HandlerList manages zero or more handlers in a list.
|
||||
type HandlerList struct {
|
||||
list []NamedHandler
|
||||
|
||||
// Called after each request handler in the list is called. If set
|
||||
// and the func returns true the HandlerList will continue to iterate
|
||||
// over the request handlers. If false is returned the HandlerList
|
||||
// will stop iterating.
|
||||
//
|
||||
// Should be used if extra logic to be performed between each handler
|
||||
// in the list. This can be used to terminate a list's iteration
|
||||
// based on a condition such as error like, HandlerListStopOnError.
|
||||
// Or for logging like HandlerListLogItem.
|
||||
AfterEachFn func(item HandlerListRunItem) bool
|
||||
}
|
||||
|
||||
// A NamedHandler is a struct that contains a name and function callback.
|
||||
type NamedHandler struct {
|
||||
Name string
|
||||
Fn func(*Request)
|
||||
}
|
||||
|
||||
// copy creates a copy of the handler list.
|
||||
func (l *HandlerList) copy() HandlerList {
|
||||
n := HandlerList{
|
||||
AfterEachFn: l.AfterEachFn,
|
||||
}
|
||||
if len(l.list) == 0 {
|
||||
return n
|
||||
}
|
||||
|
||||
n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
|
||||
return n
|
||||
}
|
||||
|
||||
// Clear clears the handler list.
|
||||
func (l *HandlerList) Clear() {
|
||||
l.list = l.list[0:0]
|
||||
}
|
||||
|
||||
// Len returns the number of handlers in the list.
|
||||
func (l *HandlerList) Len() int {
|
||||
return len(l.list)
|
||||
}
|
||||
|
||||
// PushBack pushes handler f to the back of the handler list.
|
||||
func (l *HandlerList) PushBack(f func(*Request)) {
|
||||
l.PushBackNamed(NamedHandler{"__anonymous", f})
|
||||
}
|
||||
|
||||
// PushBackNamed pushes named handler f to the back of the handler list.
|
||||
func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
||||
if cap(l.list) == 0 {
|
||||
l.list = make([]NamedHandler, 0, 5)
|
||||
}
|
||||
l.list = append(l.list, n)
|
||||
}
|
||||
|
||||
// PushFront pushes handler f to the front of the handler list.
|
||||
func (l *HandlerList) PushFront(f func(*Request)) {
|
||||
l.PushFrontNamed(NamedHandler{"__anonymous", f})
|
||||
}
|
||||
|
||||
// PushFrontNamed pushes named handler f to the front of the handler list.
|
||||
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
||||
if cap(l.list) == len(l.list) {
|
||||
// Allocating new list required
|
||||
l.list = append([]NamedHandler{n}, l.list...)
|
||||
} else {
|
||||
// Enough room to prepend into list.
|
||||
l.list = append(l.list, NamedHandler{})
|
||||
copy(l.list[1:], l.list)
|
||||
l.list[0] = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes a NamedHandler n
|
||||
func (l *HandlerList) Remove(n NamedHandler) {
|
||||
l.RemoveByName(n.Name)
|
||||
}
|
||||
|
||||
// RemoveByName removes a NamedHandler by name.
|
||||
func (l *HandlerList) RemoveByName(name string) {
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
m := l.list[i]
|
||||
if m.Name == name {
|
||||
// Shift array preventing creating new arrays
|
||||
copy(l.list[i:], l.list[i+1:])
|
||||
l.list[len(l.list)-1] = NamedHandler{}
|
||||
l.list = l.list[:len(l.list)-1]
|
||||
|
||||
// decrement list so next check to length is correct
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SwapNamed will swap out any existing handlers with the same name as the
|
||||
// passed in NamedHandler returning true if handlers were swapped. False is
|
||||
// returned otherwise.
|
||||
func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == n.Name {
|
||||
l.list[i].Fn = n.Fn
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// Swap will swap out all handlers matching the name passed in. The matched
|
||||
// handlers will be swapped in. True is returned if the handlers were swapped.
|
||||
func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
|
||||
var swapped bool
|
||||
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == name {
|
||||
l.list[i] = replace
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// SetBackNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the end of the list.
|
||||
func (l *HandlerList) SetBackNamed(n NamedHandler) {
|
||||
if !l.SwapNamed(n) {
|
||||
l.PushBackNamed(n)
|
||||
}
|
||||
}
|
||||
|
||||
// SetFrontNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the beginning of
|
||||
// the list.
|
||||
func (l *HandlerList) SetFrontNamed(n NamedHandler) {
|
||||
if !l.SwapNamed(n) {
|
||||
l.PushFrontNamed(n)
|
||||
}
|
||||
}
|
||||
|
||||
// Run executes all handlers in the list with a given request object.
|
||||
func (l *HandlerList) Run(r *Request) {
|
||||
for i, h := range l.list {
|
||||
h.Fn(r)
|
||||
item := HandlerListRunItem{
|
||||
Index: i, Handler: h, Request: r,
|
||||
}
|
||||
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandlerListLogItem logs the request handler and the state of the
|
||||
// request's Error value. Always returns true to continue iterating
|
||||
// request handlers in a HandlerList.
|
||||
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||
if item.Request.Config.Logger == nil {
|
||||
return true
|
||||
}
|
||||
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||
item.Index, item.Handler.Name, item.Request.Error)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||
// over request handlers if Request.Error is not nil. True otherwise
|
||||
// to continue iterating.
|
||||
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||
return item.Request.Error == nil
|
||||
}
|
||||
|
||||
// WithAppendUserAgent will add a string to the user agent prefixed with a
|
||||
// single white space.
|
||||
func WithAppendUserAgent(s string) Option {
|
||||
return func(r *Request) {
|
||||
r.Handlers.Build.PushBack(func(r2 *Request) {
|
||||
AddToUserAgent(r, s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||
// header. If the extra parameters are provided they will be added as metadata to the
|
||||
// name/version pair resulting in the following format.
|
||||
// "name/version (extra0; extra1; ...)"
|
||||
// The user agent part will be concatenated with this current request's user agent string.
|
||||
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
|
||||
ua := fmt.Sprintf("%s/%s", name, version)
|
||||
if len(extra) > 0 {
|
||||
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
|
||||
}
|
||||
return func(r *Request) {
|
||||
AddToUserAgent(r, ua)
|
||||
}
|
||||
}
|
||||
|
||||
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
|
||||
// The input string will be concatenated with the current request's user agent string.
|
||||
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
||||
return func(r *Request) {
|
||||
AddToUserAgent(r, s)
|
||||
}
|
||||
}
|
||||
|
||||
// WithSetRequestHeaders updates the operation request's HTTP header to contain
|
||||
// the header key value pairs provided. If the header key already exists in the
|
||||
// request's HTTP header set, the existing value(s) will be replaced.
|
||||
//
|
||||
// Header keys added will be added as canonical format with title casing
|
||||
// applied via http.Header.Set method.
|
||||
func WithSetRequestHeaders(h map[string]string) Option {
|
||||
return withRequestHeader(h).SetRequestHeaders
|
||||
}
|
||||
|
||||
type withRequestHeader map[string]string
|
||||
|
||||
func (h withRequestHeader) SetRequestHeaders(r *Request) {
|
||||
for k, v := range h {
|
||||
r.HTTPRequest.Header.Set(k, v)
|
||||
}
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||
req := new(http.Request)
|
||||
*req = *r
|
||||
req.URL = &url.URL{}
|
||||
*req.URL = *r.URL
|
||||
req.Body = body
|
||||
|
||||
req.Header = http.Header{}
|
||||
for k, v := range r.Header {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||
// with retrying requests
|
||||
type offsetReader struct {
|
||||
buf io.ReadSeeker
|
||||
lock sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
|
||||
reader := &offsetReader{}
|
||||
_, err := buf.Seek(offset, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.buf = buf
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Close will close the instance of the offset reader's access to
|
||||
// the underlying io.ReadSeeker.
|
||||
func (o *offsetReader) Close() error {
|
||||
o.lock.Lock()
|
||||
defer o.lock.Unlock()
|
||||
o.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read is a thread-safe read of the underlying io.ReadSeeker
|
||||
func (o *offsetReader) Read(p []byte) (int, error) {
|
||||
o.lock.Lock()
|
||||
defer o.lock.Unlock()
|
||||
|
||||
if o.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
return o.buf.Read(p)
|
||||
}
|
||||
|
||||
// Seek is a thread-safe seeking operation.
|
||||
func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
|
||||
o.lock.Lock()
|
||||
defer o.lock.Unlock()
|
||||
|
||||
return o.buf.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
||||
// and close the old buffer.
|
||||
func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
|
||||
if err := o.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newOffsetReader(o.buf, offset)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue