master v1.0.72
李光春 1 year ago
parent 013838b100
commit 915774b7e1

@ -13,22 +13,17 @@ require (
github.com/go-playground/validator/v10 v10.11.1
github.com/go-redis/redis/v9 v9.0.0-rc.2
github.com/go-sql-driver/mysql v1.7.0
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.22.11+incompatible
github.com/jasonlvhit/gocron v0.0.1
github.com/lib/pq v1.10.7
github.com/mitchellh/mapstructure v1.5.0
github.com/mvdan/xurls v1.1.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/nilorg/sdk v0.0.0-20221104025912-4b6ccb7004d8
github.com/oschwald/geoip2-golang v1.8.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/qiniu/go-sdk/v7 v7.14.0
github.com/robfig/cron/v3 v3.0.1
github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sirupsen/logrus v1.9.0
github.com/tencentyun/cos-go-sdk-v5 v0.7.40
github.com/upyun/go-sdk/v3 v3.0.3
github.com/tencentyun/cos-go-sdk-v5 v0.7.41
go.mongodb.org/mongo-driver v1.11.1
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.5.0

@ -108,7 +108,6 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-redis/redis v6.15.5+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v9 v9.0.0-rc.2 h1:IN1eI8AvJJeWHjMW/hlFAv2sAfvTun2DVksDDJ3a6a0=
github.com/go-redis/redis/v9 v9.0.0-rc.2/go.mod h1:cgBknjwcBJa2prbnuHH/4k/Mlj4r0pWNV2HBanHujfY=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -187,8 +186,6 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.22.11+incompatible h1:bSww59mgbqFRGCRvlvfQutsptE3lRjNiU5C0YNT/bWw=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.22.11+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
@ -245,8 +242,6 @@ github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels=
github.com/jasonlvhit/gocron v0.0.1 h1:qTt5qF3b3srDjeOIR4Le1LfeyvoYzJlYpqvG7tJX5YU=
github.com/jasonlvhit/gocron v0.0.1/go.mod h1:k9a3TV8VcU73XZxfVHCHWMWF9SOqgoku0/QlY2yvlA4=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
@ -348,18 +343,14 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nilorg/sdk v0.0.0-20221104025912-4b6ccb7004d8 h1:9hvJ/9GQssABrUYNOW1Q6X9/7uY6+Srj9YYYQZVC0AE=
github.com/nilorg/sdk v0.0.0-20221104025912-4b6ccb7004d8/go.mod h1:X1swpPdqguAZaBDoEPyEWHSsJii0YQ1o+3piMv6W3JU=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
@ -376,8 +367,6 @@ github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd918
github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
@ -476,8 +465,8 @@ github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFd
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
github.com/tencentyun/cos-go-sdk-v5 v0.7.41 h1:iU0Li/Np78H4SBna0ECQoF3mpgi6ImLXU+doGzPFXGc=
github.com/tencentyun/cos-go-sdk-v5 v0.7.41/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
@ -490,8 +479,6 @@ github.com/ugorji/go/codec v1.2.8 h1:sgBJS6COt0b/P40VouWKdseidkDgHxYGm0SAglUHfP0
github.com/ugorji/go/codec v1.2.8/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/upyun/go-sdk/v3 v3.0.3 h1:2wUkNk2fyJReMYHMvJyav050D83rYwSjN7mEPR0Pp8Q=
github.com/upyun/go-sdk/v3 v3.0.3/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
@ -589,7 +576,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=

@ -1,5 +1,5 @@
package go_library
func Version() string {
return "1.0.71"
return "1.0.72"
}

@ -1,40 +0,0 @@
package gocache
import (
"github.com/patrickmn/go-cache"
"time"
)
// GoConfig 配置
type GoConfig struct {
DefaultExpiration time.Duration // 默认过期时间
DefaultClear time.Duration // 清理过期数据
}
// Go https://github.com/patrickmn/go-cache
type Go struct {
config *GoConfig
db *cache.Cache // 驱动
}
// NewGo 实例化
func NewGo(config *GoConfig) *Go {
c := &Go{config: config}
c.db = cache.New(c.config.DefaultExpiration, c.config.DefaultClear)
return c
}
// Set 插入数据 并设置过期时间
func (c *Go) Set(key string, value interface{}, expirationTime time.Duration) {
c.db.Set(key, value, expirationTime)
}
// Get 获取单个数据
func (c *Go) Get(key string) (interface{}, bool) {
return c.db.Get(key)
}
// SetDefault 插入数据 并设置为默认过期时间
func (c *Go) SetDefault(key string, value interface{}) {
c.db.Set(key, value, c.config.DefaultExpiration)
}

@ -1,42 +0,0 @@
package gocache
import (
"time"
)
// GoCacheConfig 配置
type GoCacheConfig struct {
expiration time.Duration // 过期时间
}
// GoCache https://github.com/patrickmn/go-cache
type GoCache struct {
config *GoCacheConfig
db *Go // 驱动
GetterInterface GttInterfaceFunc // 不存在的操作
}
// NewCache 实例化
func (c *Go) NewCache(config *GoCacheConfig) *GoCache {
cc := &GoCache{config: config}
cc.db = c
return cc
}
// GetInterface 缓存操作
func (gc *GoCache) GetInterface(key string) (ret interface{}) {
f := func() interface{} {
return gc.GetterInterface()
}
// 如果不存在则调用GetterInterface
ret, found := gc.db.Get(key)
if found == false {
gc.db.Set(key, f(), gc.config.expiration)
ret, _ = gc.db.Get(key)
}
return
}

@ -1,5 +1,14 @@
package godecimal
import "fmt"
// NewInterface 创建
func NewInterface(value interface{}) Decimal {
d := New()
d.floatValue.SetString(fmt.Sprint(value))
return d
}
// NewString 从字符串创建
func NewString(s string) Decimal {
d := New()

@ -1,12 +0,0 @@
package goorder
import (
"fmt"
"github.com/dtapps/go-library/utils/gorandom"
"github.com/dtapps/go-library/utils/gotime"
)
// GenerateOrDate 生成订单号
func GenerateOrDate[T string | int | int8 | int16 | int32 | int64](customId T) string {
return fmt.Sprintf("%v%s%s", customId, gotime.Current().SetFormat("200601021504"), gorandom.Numeric(3))
}

@ -2,7 +2,7 @@ package goparams
import (
"encoding/json"
"github.com/nilorg/sdk/convert"
"github.com/dtapps/go-library/utils/godecimal"
"log"
"net/url"
)
@ -47,7 +47,7 @@ func GetParamsString(src interface{}) string {
case int, int8, int32, int64:
case uint8, uint16, uint32, uint64:
case float32, float64:
return convert.ToString(src)
return godecimal.NewInterface(src).String()
}
data, err := json.Marshal(src)
if err != nil {

@ -1,54 +0,0 @@
package gostorage
import (
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
"io"
)
// Huaweicloud 华为云
type Huaweicloud struct {
AccessKey string
SecretKey string
Endpoint string
BucketName string
error error // 错误信息
client *obs.ObsClient // 驱动
}
// NewHuaweicloud 初始化
// https://support.huaweicloud.com/sdk-go-devg-obs/obs_33_0001.html
// https://github.com/huaweicloud/huaweicloud-sdk-go-obs
func NewHuaweicloud(accessKey string, secretKey string, endpoint string, bucketName string) *Huaweicloud {
app := &Huaweicloud{AccessKey: accessKey, SecretKey: secretKey, Endpoint: endpoint, BucketName: bucketName}
app.client, app.error = obs.New(accessKey, secretKey, endpoint)
if app.error == nil {
app.client.Close() // 关闭obsClient
}
return app
}
// Bucket 存储空间
func (c *Huaweicloud) Bucket(name string) *Huaweicloud {
c.BucketName = name
return c
}
// PutObject 上传文件流
// @param file 文件流
// @param filePath 文件路径
// @param fileName 文件名称
func (c *Huaweicloud) PutObject(file io.Reader, filePath, fileName string) (resp FileInfo, err error) {
objectKey := filePath
if fileName != "" {
objectKey = filePath + "/" + fileName
}
input := &obs.PutObjectInput{}
input.Bucket = c.BucketName
input.Key = objectKey
input.Body = file
_, err = c.client.PutObject(input)
resp.Path = filePath
resp.Name = fileName
resp.Url = objectKey
return
}

@ -1,52 +0,0 @@
package gostorage
import (
"github.com/upyun/go-sdk/v3/upyun"
"io"
)
// Upyun 又拍云
type Upyun struct {
Operator string
Password string
BucketName string
client *upyun.UpYun // 驱动
}
// NewUpyun 初始化
// https://help.upyun.com/docs/storage/
// https://github.com/upyun/go-sdk
func NewUpyun(operator string, password string, bucketName string) *Upyun {
app := &Upyun{Operator: operator, Password: password, BucketName: bucketName}
app.client = upyun.NewUpYun(&upyun.UpYunConfig{
Bucket: bucketName,
Operator: operator,
Password: password,
})
return app
}
// Bucket 存储空间
func (c *Upyun) Bucket(name string) *Upyun {
c.BucketName = name
return c
}
// PutObject 上传文件流
// @param file 文件流
// @param filePath 文件路径
// @param fileName 文件名称
func (c *Upyun) PutObject(file io.Reader, filePath, fileName, acl string) (resp FileInfo, err error) {
objectKey := filePath
if fileName != "" {
objectKey = filePath + "/" + fileName
}
err = c.client.Put(&upyun.PutObjectConfig{
Path: "/demo.log",
LocalPath: "/tmp/upload",
})
resp.Path = filePath
resp.Name = fileName
resp.Url = objectKey
return
}

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 Huawei Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,320 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"fmt"
"net/url"
"sort"
"strings"
"time"
)
func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, expires int64) (requestURL string, err error) {
sh := obsClient.getSecurity()
isAkSkEmpty := sh.ak == "" || sh.sk == ""
if isAkSkEmpty == false && sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
}
}
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)
hostName := parsedRequestURL.Host
isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)
if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
if isV4 {
date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
delete(headers, HEADER_DATE_CAMEL)
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)
if len(headers[HEADER_HOST_CAMEL]) != 0 {
index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":")
if index != -1 {
port := headers[HEADER_HOST_CAMEL][0][index+1:]
if port == "80" || port == "443" {
headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]}
}
}
}
signedHeaders, _headers := getSignedHeaders(headers)
credential, scope := getCredential(sh.ak, obsClient.conf.region, shortDate)
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")
requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
return "", _err
}
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
signature := getSignature(stringToSign, sh.sk, obsClient.conf.region, shortDate)
requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))
} else {
originDate := headers[HEADER_DATE_CAMEL][0]
date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate)
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
expires += date.Unix()
headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs)
signature := UrlEncode(Base64Encode(HmacSha1([]byte(sh.sk), []byte(stringToSign))), false)
if strings.Index(requestURL, "?") < 0 {
requestURL += "?"
} else {
requestURL += "&"
}
delete(headers, HEADER_DATE_CAMEL)
if obsClient.conf.signature != SignatureObs {
requestURL += "AWS"
}
requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(sh.ak, false), expires, signature)
}
}
return
}
func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, hostName string) (requestURL string, err error) {
sh := obsClient.getSecurity()
isAkSkEmpty := sh.ak == "" || sh.sk == ""
if isAkSkEmpty == false && sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = []string{sh.securityToken}
} else {
headers[HEADER_STS_TOKEN_AMZ] = []string{sh.securityToken}
}
}
isObs := obsClient.conf.signature == SignatureObs
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)
if hostName == "" {
hostName = parsedRequestURL.Host
}
isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)
if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
ak := sh.ak
sk := sh.sk
var authorization string
if isV4 {
headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD}
ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers)
authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else {
ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
hashPrefix := V2_HASH_PREFIX
if isObs {
hashPrefix = OBS_HASH_PREFIX
}
authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"])
}
headers[HEADER_AUTH_CAMEL] = []string{authorization}
}
return
}
func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) {
headers[HEADER_HOST_CAMEL] = []string{hostName}
if date, ok := headers[HEADER_DATE_AMZ]; ok {
flag := false
if len(date) == 1 {
if isV4 {
if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
flag = true
}
} else {
if strings.HasSuffix(date[0], "GMT") {
headers[HEADER_DATE_CAMEL] = []string{date[0]}
flag = true
}
}
}
if !flag {
delete(headers, HEADER_DATE_AMZ)
}
}
if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
}
}
func encodeHeaders(headers map[string][]string) {
for key, values := range headers {
for index, value := range values {
values[index] = UrlEncode(value, true)
}
headers[key] = values
}
}
func prepareDateHeader(dataHeader, dateCamelHeader string, headers, _headers map[string][]string) {
if _, ok := _headers[HEADER_DATE_CAMEL]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
} else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
}
}
func getStringToSign(keys []string, isObs bool, _headers map[string][]string) []string {
stringToSign := make([]string, 0, len(keys))
for _, key := range keys {
var value string
prefixHeader := HEADER_PREFIX
prefixMetaHeader := HEADER_PREFIX_META
if isObs {
prefixHeader = HEADER_PREFIX_OBS
prefixMetaHeader = HEADER_PREFIX_META_OBS
}
if strings.HasPrefix(key, prefixHeader) {
if strings.HasPrefix(key, prefixMetaHeader) {
for index, v := range _headers[key] {
value += strings.TrimSpace(v)
if index != len(_headers[key])-1 {
value += ","
}
}
} else {
value = strings.Join(_headers[key], ",")
}
value = fmt.Sprintf("%s:%s", key, value)
} else {
value = strings.Join(_headers[key], ",")
}
stringToSign = append(stringToSign, value)
}
return stringToSign
}
func attachHeaders(headers map[string][]string, isObs bool) string {
length := len(headers)
_headers := make(map[string][]string, length)
keys := make([]string, 0, length)
for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
prefixheader := HEADER_PREFIX
if isObs {
prefixheader = HEADER_PREFIX_OBS
}
if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) {
keys = append(keys, _key)
_headers[_key] = value
}
} else {
delete(headers, key)
}
}
for _, interestedHeader := range interestedHeaders {
if _, ok := _headers[interestedHeader]; !ok {
_headers[interestedHeader] = []string{""}
keys = append(keys, interestedHeader)
}
}
dateCamelHeader := PARAM_DATE_AMZ_CAMEL
dataHeader := HEADER_DATE_AMZ
if isObs {
dateCamelHeader = PARAM_DATE_OBS_CAMEL
dataHeader = HEADER_DATE_OBS
}
prepareDateHeader(dataHeader, dateCamelHeader, headers, _headers)
sort.Strings(keys)
stringToSign := getStringToSign(keys, isObs, _headers)
return strings.Join(stringToSign, "\n")
}
func getScope(region, shortDate string) string {
return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
}
func getCredential(ak, region, shortDate string) (string, string) {
scope := getScope(region, shortDate)
return fmt.Sprintf("%s/%s", ak, scope), scope
}
func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
length := len(headers)
_headers := make(map[string][]string, length)
signedHeaders := make([]string, 0, length)
for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
signedHeaders = append(signedHeaders, _key)
_headers[_key] = value
} else {
delete(headers, key)
}
}
sort.Strings(signedHeaders)
return signedHeaders, _headers
}
func getSignature(stringToSign, sk, region, shortDate string) string {
key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
key = HmacSha256(key, []byte(region))
key = HmacSha256(key, []byte(V4_SERVICE_NAME))
key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
return Hex(HmacSha256(key, []byte(stringToSign)))
}

@ -1,55 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"strings"
)
func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string {
stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "")
var isSecurityToken bool
var securityToken []string
if isObs {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]
} else {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
parmas := strings.Split(canonicalizedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
}
logStringToSign := stringToSign
if isSecurityToken && len(securityToken) > 0 {
logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign)
return stringToSign
}
func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string {
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))}
}

@ -1,137 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"strings"
"time"
)
func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
canonicalRequest = append(canonicalRequest, method)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, canonicalizedURL)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, queryURL)
canonicalRequest = append(canonicalRequest, "\n")
for _, signedHeader := range signedHeaders {
values, _ := headers[signedHeader]
for _, value := range values {
canonicalRequest = append(canonicalRequest, signedHeader)
canonicalRequest = append(canonicalRequest, ":")
canonicalRequest = append(canonicalRequest, value)
canonicalRequest = append(canonicalRequest, "\n")
}
}
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, payload)
_canonicalRequest := strings.Join(canonicalRequest, "")
var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
query = strings.Split(queryURL, "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
logCanonicalRequest := _canonicalRequest
if isSecurityToken && len(securityToken) > 0 {
logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest)
stringToSign := make([]string, 0, 7)
stringToSign = append(stringToSign, V4_HASH_PREFIX)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, longDate)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, scope)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))
_stringToSign := strings.Join(stringToSign, "")
doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign)
return _stringToSign
}
// V4Auth is a wrapper for v4Auth
func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers)
}
func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
var t time.Time
if val, ok := headers[HEADER_DATE_AMZ]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[HEADER_DATE_CAMEL]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else {
t = time.Now().UTC()
}
shortDate := t.Format(SHORT_DATE_FORMAT)
longDate := t.Format(LONG_DATE_FORMAT)
signedHeaders, _headers := getSignedHeaders(headers)
credential, scope := getCredential(ak, region, shortDate)
payload := UNSIGNED_PAYLOAD
if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok {
payload = val[0]
}
stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers)
signature := getSignature(stringToSign, sk, region, shortDate)
ret := make(map[string]string, 3)
ret["Credential"] = credential
ret["SignedHeaders"] = strings.Join(signedHeaders, ";")
ret["Signature"] = signature
return ret
}

@ -1,68 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"fmt"
"net/http"
"strings"
)
// ObsClient defines OBS client.
type ObsClient struct {
conf *config
httpClient *http.Client
}
// New creates a new ObsClient instance.
func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) {
conf := &config{endpoint: endpoint}
conf.securityProviders = make([]securityProvider, 0, 3)
conf.securityProviders = append(conf.securityProviders, NewBasicSecurityProvider(ak, sk, ""))
conf.maxRetryCount = -1
conf.maxRedirectCount = -1
for _, configurer := range configurers {
configurer(conf)
}
if err := conf.initConfigWithDefault(); err != nil {
return nil, err
}
err := conf.getTransport()
if err != nil {
return nil, err
}
if isWarnLogEnabled() {
info := make([]string, 3)
info[0] = fmt.Sprintf("[OBS SDK Version=%s", OBS_SDK_VERSION)
info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint)
accessMode := "Virtual Hosting"
if conf.pathStyle {
accessMode = "Path"
}
info[2] = fmt.Sprintf("Access Mode=%s]", accessMode)
doLog(LEVEL_WARN, strings.Join(info, "];["))
}
if conf.httpClient != nil {
doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
obsClient := &ObsClient{conf: conf, httpClient: conf.httpClient}
return obsClient, nil
}
doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}}
return obsClient, nil
}

@ -1,742 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"fmt"
"strings"
)
// ListBuckets lists buckets.
//
// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order.
func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) {
if input == nil {
input = &ListBucketsInput{}
}
output = &ListBucketsOutput{}
err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions)
if err != nil {
output = nil
}
return
}
// CreateBucket creates a bucket.
//
// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS.
func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("CreateBucketInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucket deletes a bucket.
//
// You can use this API to delete a bucket. The bucket to be deleted must be empty
// (containing no objects, noncurrent object versions, or part fragments).
func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketStoragePolicy sets bucket storage class.
//
// You can use this API to set storage class for bucket.
func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketStoragePolicyInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
var outputS3 *getBucketStoragePolicyOutputS3
outputS3 = &getBucketStoragePolicyOutputS3{}
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions)
if err != nil {
output = nil
return
}
output.BaseModel = outputS3.BaseModel
output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass)
return
}
func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
var outputObs *getBucketStoragePolicyOutputObs
outputObs = &getBucketStoragePolicyOutputObs{}
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions)
if err != nil {
output = nil
return
}
output.BaseModel = outputObs.BaseModel
output.StorageClass = outputObs.StorageClass
return
}
// GetBucketStoragePolicy gets bucket storage class.
//
// You can use this API to obtain the storage class of a bucket.
func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketStoragePolicyObs(bucketName, extensions)
}
return obsClient.getBucketStoragePolicyS3(bucketName, extensions)
}
// SetBucketQuota sets the bucket quota.
//
// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1.
func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketQuotaInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketQuota gets the bucket quota.
//
// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota.
func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) {
output = &GetBucketQuotaOutput{}
err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions)
if err != nil {
output = nil
}
return
}
// HeadBucket checks whether a bucket exists.
//
// You can use this API to check whether a bucket exists.
func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketMetadata gets the metadata of a bucket.
//
// You can use this API to send a HEAD request to a bucket to obtain the bucket
// metadata such as the storage class and CORS rules (if set).
func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) {
output = &GetBucketMetadataOutput{}
err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetBucketMetadataOutput(output)
}
return
}
func (obsClient ObsClient) GetBucketFSStatus(input *GetBucketFSStatusInput, extensions ...extensionOptions) (output *GetBucketFSStatusOutput, err error) {
output = &GetBucketFSStatusOutput{}
err = obsClient.doActionWithBucket("GetBucketFSStatus", HTTP_HEAD, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetBucketFSStatusOutput(output)
}
return
}
// GetBucketStorageInfo gets storage information about a bucket.
//
// You can use this API to obtain storage information about a bucket, including the
// bucket size and number of objects in the bucket.
func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) {
output = &GetBucketStorageInfoOutput{}
err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
var outputS3 *getBucketLocationOutputS3
outputS3 = &getBucketLocationOutputS3{}
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputS3.BaseModel
output.Location = outputS3.Location
}
return
}
func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
var outputObs *getBucketLocationOutputObs
outputObs = &getBucketLocationOutputObs{}
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputObs.BaseModel
output.Location = outputObs.Location
}
return
}
// GetBucketLocation gets the location of a bucket.
//
// You can use this API to obtain the bucket location.
func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) {
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketLocationObs(bucketName, extensions)
}
return obsClient.getBucketLocationS3(bucketName, extensions)
}
// SetBucketAcl sets the bucket ACL.
//
// You can use this API to set the ACL for a bucket.
func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketAclInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
var outputObs *getBucketACLOutputObs
outputObs = &getBucketACLOutputObs{}
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputObs.BaseModel
output.Owner = outputObs.Owner
output.Grants = make([]Grant, 0, len(outputObs.Grants))
for _, valGrant := range outputObs.Grants {
tempOutput := Grant{}
tempOutput.Delivered = valGrant.Delivered
tempOutput.Permission = valGrant.Permission
tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName
tempOutput.Grantee.ID = valGrant.Grantee.ID
tempOutput.Grantee.Type = valGrant.Grantee.Type
tempOutput.Grantee.URI = GroupAllUsers
output.Grants = append(output.Grants, tempOutput)
}
}
return
}
// GetBucketAcl gets the bucket ACL.
//
// You can use this API to obtain a bucket ACL.
func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketACLObs(bucketName, extensions)
}
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketPolicy sets the bucket policy.
//
// You can use this API to set a bucket policy. If the bucket already has a policy, the
// policy will be overwritten by the one specified in this request.
func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketPolicy is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketPolicy gets the bucket policy.
//
// You can use this API to obtain the policy of a bucket.
func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) {
output = &GetBucketPolicyOutput{}
err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketPolicy deletes the bucket policy.
//
// You can use this API to delete the policy of a bucket.
func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketCors sets CORS rules for a bucket.
//
// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests.
func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketCorsInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketCors gets CORS rules of a bucket.
//
// You can use this API to obtain the CORS rules of a specified bucket.
func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) {
output = &GetBucketCorsOutput{}
err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketCors deletes CORS rules of a bucket.
//
// You can use this API to delete the CORS rules of a specified bucket.
func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketVersioning sets the versioning status for a bucket.
//
// You can use this API to set the versioning status for a bucket.
func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketVersioningInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketVersioning gets the versioning status of a bucket.
//
// You can use this API to obtain the versioning status of a bucket.
func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) {
output = &GetBucketVersioningOutput{}
err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketWebsiteConfiguration sets website hosting for a bucket.
//
// You can use this API to set website hosting for a bucket.
func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketWebsiteConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket.
//
// You can use this API to obtain the website hosting settings of a bucket.
func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) {
output = &GetBucketWebsiteConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket.
//
// You can use this API to delete the website hosting settings of a bucket.
func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketLoggingConfiguration sets the bucket logging.
//
// You can use this API to configure access logging for a bucket.
func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketLoggingConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketLoggingConfiguration gets the logging settings of a bucket.
//
// You can use this API to obtain the access logging settings of a bucket.
func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) {
output = &GetBucketLoggingConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket.
//
// You can use this API to set lifecycle rules for a bucket, to periodically transit
// storage classes of objects and delete objects in the bucket.
func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketLifecycleConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket.
//
// You can use this API to obtain the lifecycle rules of a bucket.
func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) {
output = &GetBucketLifecycleConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket.
//
// You can use this API to delete all lifecycle rules of a bucket.
func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketEncryption sets the default server-side encryption for a bucket.
//
// You can use this API to create or update the default server-side encryption for a bucket.
func (obsClient ObsClient) SetBucketEncryption(input *SetBucketEncryptionInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketEncryptionInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketEncryption", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketEncryption gets the encryption configuration of a bucket.
//
// You can use this API to obtain obtain the encryption configuration of a bucket.
func (obsClient ObsClient) GetBucketEncryption(bucketName string, extensions ...extensionOptions) (output *GetBucketEncryptionOutput, err error) {
output = &GetBucketEncryptionOutput{}
err = obsClient.doActionWithBucket("GetBucketEncryption", HTTP_GET, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketEncryption deletes the encryption configuration of a bucket.
//
// You can use this API to delete the encryption configuration of a bucket.
func (obsClient ObsClient) DeleteBucketEncryption(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketEncryption", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketTagging sets bucket tags.
//
// You can use this API to set bucket tags.
func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketTaggingInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketTagging gets bucket tags.
//
// You can use this API to obtain the tags of a specified bucket.
func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) {
output = &GetBucketTaggingOutput{}
err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketTagging deletes bucket tags.
//
// You can use this API to delete the tags of a specified bucket.
func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketNotification sets event notification for a bucket.
//
// You can use this API to configure event notification for a bucket. You will be notified of all
// specified operations performed on the bucket.
func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketNotificationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketNotification gets event notification settings of a bucket.
//
// You can use this API to obtain the event notification configuration of a bucket.
func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) {
if obsClient.conf.signature != SignatureObs {
return obsClient.getBucketNotificationS3(bucketName, extensions)
}
output = &GetBucketNotificationOutput{}
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) {
outputS3 := &getBucketNotificationOutputS3{}
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions)
if err != nil {
return nil, err
}
output = &GetBucketNotificationOutput{}
output.BaseModel = outputS3.BaseModel
topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations))
for _, topicConfigurationS3 := range outputS3.TopicConfigurations {
topicConfiguration := TopicConfiguration{}
topicConfiguration.ID = topicConfigurationS3.ID
topicConfiguration.Topic = topicConfigurationS3.Topic
topicConfiguration.FilterRules = topicConfigurationS3.FilterRules
events := make([]EventType, 0, len(topicConfigurationS3.Events))
for _, event := range topicConfigurationS3.Events {
events = append(events, ParseStringToEventType(event))
}
topicConfiguration.Events = events
topicConfigurations = append(topicConfigurations, topicConfiguration)
}
output.TopicConfigurations = topicConfigurations
return
}
// SetBucketRequestPayment sets requester-pays setting for a bucket.
func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketRequestPaymentInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketRequestPayment gets requester-pays setting of a bucket.
func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) {
output = &GetBucketRequestPaymentOutput{}
err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketFetchPolicy sets the bucket fetch policy.
//
// You can use this API to set a bucket fetch policy.
func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketFetchPolicyInput is nil")
}
if strings.TrimSpace(string(input.Status)) == "" {
return nil, errors.New("Fetch policy status is empty")
}
if strings.TrimSpace(input.Agency) == "" {
return nil, errors.New("Fetch policy agency is empty")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketFetchPolicy gets the bucket fetch policy.
//
// You can use this API to obtain the fetch policy of a bucket.
func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) {
if input == nil {
return nil, errors.New("GetBucketFetchPolicyInput is nil")
}
output = &GetBucketFetchPolicyOutput{}
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}
// DeleteBucketFetchPolicy deletes the bucket fetch policy.
//
// You can use this API to delete the fetch policy of a bucket.
func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("DeleteBucketFetchPolicyInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}
// SetBucketFetchJob sets the bucket fetch job.
//
// You can use this API to set a bucket fetch job.
func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) {
if input == nil {
return nil, errors.New("SetBucketFetchJobInput is nil")
}
if strings.TrimSpace(input.URL) == "" {
return nil, errors.New("URL is empty")
}
output = &SetBucketFetchJobOutput{}
err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetBucketFetchJob gets the bucket fetch job.
//
// You can use this API to obtain the fetch job of a bucket.
func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) {
if input == nil {
return nil, errors.New("GetBucketFetchJobInput is nil")
}
if strings.TrimSpace(input.JobID) == "" {
return nil, errors.New("JobID is empty")
}
output = &GetBucketFetchJobOutput{}
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions)
if err != nil {
output = nil
}
return
}

@ -1,481 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"fmt"
"io"
"os"
"strings"
)
// ListObjects lists objects in a bucket.
//
// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed.
func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) {
if input == nil {
return nil, errors.New("ListObjectsInput is nil")
}
output = &ListObjectsOutput{}
err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
output = nil
}
}
}
return
}
// ListVersions lists versioning objects in a bucket.
//
// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed.
func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) {
if input == nil {
return nil, errors.New("ListVersionsInput is nil")
}
output = &ListVersionsOutput{}
err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListVersionsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
output = nil
}
}
}
return
}
// HeadObject checks whether an object exists.
//
// You can use this API to check whether an object exists.
func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("HeadObjectInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}
// SetObjectMetadata sets object metadata.
func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) {
output = &SetObjectMetadataOutput{}
err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseSetObjectMetadataOutput(output)
}
return
}
// DeleteObject deletes an object.
//
// You can use this API to delete an object from a specified bucket.
func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) {
if input == nil {
return nil, errors.New("DeleteObjectInput is nil")
}
output = &DeleteObjectOutput{}
err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseDeleteObjectOutput(output)
}
return
}
// DeleteObjects deletes objects in a batch.
//
// You can use this API to batch delete objects from a specified bucket.
func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) {
if input == nil {
return nil, errors.New("DeleteObjectsInput is nil")
}
output = &DeleteObjectsOutput{}
err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeDeleteObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
output = nil
}
}
return
}
// SetObjectAcl sets ACL for an object.
//
// You can use this API to set the ACL for an object in a specified bucket.
func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetObjectAclInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetObjectAcl gets the ACL of an object.
//
// You can use this API to obtain the ACL of an object in a specified bucket.
func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectAclInput is nil")
}
output = &GetObjectAclOutput{}
err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}
}
return
}
// RestoreObject restores an object.
func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("RestoreObjectInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}
// GetObjectMetadata gets object metadata.
//
// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata.
func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectMetadataInput is nil")
}
output = &GetObjectMetadataOutput{}
err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetObjectMetadataOutput(output)
}
return
}
func (obsClient ObsClient) GetAttribute(input *GetAttributeInput, extensions ...extensionOptions) (output *GetAttributeOutput, err error) {
if input == nil {
return nil, errors.New("GetAttributeInput is nil")
}
output = &GetAttributeOutput{}
err = obsClient.doActionWithBucketAndKey("GetAttribute", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetAttributeOutput(output)
}
return
}
// GetObject downloads object.
//
// You can use this API to download an object in a specified bucket.
func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectInput is nil")
}
output = &GetObjectOutput{}
err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetObjectOutput(output)
}
return
}
// PutObject uploads an object to the specified bucket.
func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
if input == nil {
return nil, errors.New("PutObjectInput is nil")
}
if input.ContentType == "" && input.Key != "" {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
input.ContentType = contentType
}
}
output = &PutObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); !ok {
repeatable = false
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}
func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
return contentType
}
if contentType, ok := mimeTypes[strings.ToLower(sourceFile[strings.LastIndex(sourceFile, ".")+1:])]; ok {
return contentType
}
return
}
func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool {
if input.ContentType == "" && input.Key != "" {
return true
}
return false
}
func (obsClient ObsClient) NewFolder(input *NewFolderInput, extensions ...extensionOptions) (output *NewFolderOutput, err error) {
if input == nil {
return nil, errors.New("NewFolderInput is nil")
}
if !strings.HasSuffix(input.Key, "/") {
input.Key += "/"
}
output = &NewFolderOutput{}
err = obsClient.doActionWithBucketAndKey("NewFolder", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseNewFolderOutput(output)
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
}
return
}
// PutFile uploads a file to the specified bucket.
func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
if input == nil {
return nil, errors.New("PutFileInput is nil")
}
var body io.Reader
sourceFile := strings.TrimSpace(input.SourceFile)
if sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()
stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd
if input.ContentLength > 0 {
if input.ContentLength > stat.Size() {
input.ContentLength = stat.Size()
}
fileReaderWrapper.totalCount = input.ContentLength
} else {
fileReaderWrapper.totalCount = stat.Size()
}
body = fileReaderWrapper
}
_input := &PutObjectInput{}
_input.PutObjectBasicInput = input.PutObjectBasicInput
_input.Body = body
if obsClient.isGetContentType(_input) {
_input.ContentType = obsClient.getContentType(_input, sourceFile)
}
output = &PutObjectOutput{}
err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}
// CopyObject creates a copy for an existing object.
//
// You can use this API to create a copy for an object in a specified bucket.
func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) {
if input == nil {
return nil, errors.New("CopyObjectInput is nil")
}
if strings.TrimSpace(input.CopySourceBucket) == "" {
return nil, errors.New("Source bucket is empty")
}
if strings.TrimSpace(input.CopySourceKey) == "" {
return nil, errors.New("Source key is empty")
}
output = &CopyObjectOutput{}
err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCopyObjectOutput(output)
}
return
}
func (obsClient ObsClient) AppendObject(input *AppendObjectInput, extensions ...extensionOptions) (output *AppendObjectOutput, err error) {
if input == nil {
return nil, errors.New("AppendObjectInput is nil")
}
if input.ContentType == "" && input.Key != "" {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
input.ContentType = contentType
}
}
output = &AppendObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); !ok {
repeatable = false
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("AppendObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("AppendObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
if err = ParseAppendObjectOutput(output); err != nil {
output = nil
}
}
return
}
func (obsClient ObsClient) ModifyObject(input *ModifyObjectInput, extensions ...extensionOptions) (output *ModifyObjectOutput, err error) {
if input == nil {
return nil, errors.New("ModifyObjectInput is nil")
}
output = &ModifyObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); !ok {
repeatable = false
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
ParseModifyObjectOutput(output)
}
return
}
func (obsClient ObsClient) RenameFile(input *RenameFileInput, extensions ...extensionOptions) (output *RenameFileOutput, err error) {
if input == nil {
return nil, errors.New("RenameFileInput is nil")
}
output = &RenameFileOutput{}
err = obsClient.doActionWithBucketAndKey("RenameFile", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) RenameFolder(input *RenameFolderInput, extensions ...extensionOptions) (output *RenameFolderOutput, err error) {
if input == nil {
return nil, errors.New("RenameFolderInput is nil")
}
if !strings.HasSuffix(input.Key, "/") {
input.Key += "/"
}
if !strings.HasSuffix(input.NewObjectKey, "/") {
input.NewObjectKey += "/"
}
output = &RenameFolderOutput{}
err = obsClient.doActionWithBucketAndKey("RenameFolder", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

@ -1,49 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"strings"
)
// Refresh refreshes ak, sk and securityToken for obsClient.
func (obsClient ObsClient) Refresh(ak, sk, securityToken string) {
for _, sp := range obsClient.conf.securityProviders {
if bsp, ok := sp.(*BasicSecurityProvider); ok {
bsp.refresh(strings.TrimSpace(ak), strings.TrimSpace(sk), strings.TrimSpace(securityToken))
break
}
}
}
func (obsClient ObsClient) getSecurity() securityHolder {
if obsClient.conf.securityProviders != nil {
for _, sp := range obsClient.conf.securityProviders {
if sp == nil {
continue
}
sh := sp.getSecurity()
if sh.ak != "" && sh.sk != "" {
return sh
}
}
}
return emptySecurityHolder
}
// Close closes ObsClient.
func (obsClient *ObsClient) Close() {
obsClient.httpClient = nil
obsClient.conf.transport.CloseIdleConnections()
obsClient.conf = nil
}

@ -1,252 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"io"
"os"
"sort"
"strings"
)
// ListMultipartUploads lists the multipart uploads.
//
// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket.
func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) {
if input == nil {
return nil, errors.New("ListMultipartUploadsInput is nil")
}
output = &ListMultipartUploadsOutput{}
err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListMultipartUploadsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
output = nil
}
}
return
}
// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("AbortMultipartUploadInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}
// InitiateMultipartUpload initializes a multipart upload.
func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) {
if input == nil {
return nil, errors.New("InitiateMultipartUploadInput is nil")
}
if input.ContentType == "" && input.Key != "" {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
input.ContentType = contentType
}
}
output = &InitiateMultipartUploadOutput{}
err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseInitiateMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeInitiateMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}
// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID.
//
// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket
// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB,
// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000.
func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) {
if _input == nil {
return nil, errors.New("UploadPartInput is nil")
}
if _input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
input := &UploadPartInput{}
input.Bucket = _input.Bucket
input.Key = _input.Key
input.PartNumber = _input.PartNumber
input.UploadId = _input.UploadId
input.ContentMD5 = _input.ContentMD5
input.SourceFile = _input.SourceFile
input.Offset = _input.Offset
input.PartSize = _input.PartSize
input.SseHeader = _input.SseHeader
input.Body = _input.Body
output = &UploadPartOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); !ok {
repeatable = false
}
if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize}
}
} else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()
stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileSize := stat.Size()
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd
if input.Offset < 0 || input.Offset > fileSize {
input.Offset = 0
}
if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) {
input.PartSize = fileSize - input.Offset
}
fileReaderWrapper.totalCount = input.PartSize
if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil {
return nil, err
}
input.Body = fileReaderWrapper
repeatable = true
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
ParseUploadPartOutput(output)
output.PartNumber = input.PartNumber
}
return
}
// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
if input == nil {
return nil, errors.New("CompleteMultipartUploadInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
var parts partSlice = input.Parts
sort.Sort(parts)
output = &CompleteMultipartUploadOutput{}
err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCompleteMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeCompleteMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}
// ListParts lists the uploaded parts in a bucket by using the multipart upload ID.
func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) {
if input == nil {
return nil, errors.New("ListPartsInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
output = &ListPartsOutput{}
err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListPartsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
output = nil
}
}
return
}
// CopyPart copy a part to a specified bucket by using a specified multipart upload ID.
//
// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) {
if input == nil {
return nil, errors.New("CopyPartInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
if strings.TrimSpace(input.CopySourceBucket) == "" {
return nil, errors.New("Source bucket is empty")
}
if strings.TrimSpace(input.CopySourceKey) == "" {
return nil, errors.New("Source key is empty")
}
output = &CopyPartOutput{}
err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCopyPartOutput(output)
output.PartNumber = input.PartNumber
}
return
}

@ -1,59 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
// UploadFile resume uploads.
//
// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file
// upload failures caused by poor network conditions and program breakdowns.
func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
if input.EnableCheckpoint && input.CheckpointFile == "" {
input.CheckpointFile = input.UploadFile + ".uploadfile_record"
}
if input.TaskNum <= 0 {
input.TaskNum = 1
}
if input.PartSize < MIN_PART_SIZE {
input.PartSize = MIN_PART_SIZE
} else if input.PartSize > MAX_PART_SIZE {
input.PartSize = MAX_PART_SIZE
}
output, err = obsClient.resumeUpload(input, extensions)
return
}
// DownloadFile resume downloads.
//
// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file
// download failures caused by poor network conditions and program breakdowns.
func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
if input.DownloadFile == "" {
input.DownloadFile = input.Key
}
if input.EnableCheckpoint && input.CheckpointFile == "" {
input.CheckpointFile = input.DownloadFile + ".downloadfile_record"
}
if input.TaskNum <= 0 {
input.TaskNum = 1
}
if input.PartSize <= 0 {
input.PartSize = DEFAULT_PART_SIZE
}
output, err = obsClient.resumeDownload(input, extensions)
return
}

@ -1,505 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
type urlHolder struct {
scheme string
host string
port int
}
type config struct {
securityProviders []securityProvider
urlHolder *urlHolder
pathStyle bool
cname bool
sslVerify bool
endpoint string
signature SignatureType
region string
connectTimeout int
socketTimeout int
headerTimeout int
idleConnTimeout int
finalTimeout int
maxRetryCount int
proxyURL string
maxConnsPerHost int
pemCerts []byte
transport *http.Transport
roundTripper http.RoundTripper
httpClient *http.Client
ctx context.Context
maxRedirectCount int
userAgent string
enableCompression bool
}
func (conf config) String() string {
return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+
"\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+
"\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]",
conf.endpoint, conf.signature, conf.pathStyle, conf.region,
conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount,
)
}
type configurer func(conf *config)
func WithSecurityProviders(sps ...securityProvider) configurer {
return func(conf *config) {
for _, sp := range sps {
if sp != nil {
conf.securityProviders = append(conf.securityProviders, sp)
}
}
}
}
// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts.
func WithSslVerify(sslVerify bool) configurer {
return WithSslVerifyAndPemCerts(sslVerify, nil)
}
// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts.
func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
return func(conf *config) {
conf.sslVerify = sslVerify
conf.pemCerts = pemCerts
}
}
// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers.
func WithHeaderTimeout(headerTimeout int) configurer {
return func(conf *config) {
conf.headerTimeout = headerTimeout
}
}
// WithProxyUrl is a configurer for ObsClient to set HTTP proxy.
func WithProxyUrl(proxyURL string) configurer {
return func(conf *config) {
conf.proxyURL = proxyURL
}
}
// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections.
func WithMaxConnections(maxConnsPerHost int) configurer {
return func(conf *config) {
conf.maxConnsPerHost = maxConnsPerHost
}
}
// WithPathStyle is a configurer for ObsClient.
func WithPathStyle(pathStyle bool) configurer {
return func(conf *config) {
conf.pathStyle = pathStyle
}
}
// WithSignature is a configurer for ObsClient.
func WithSignature(signature SignatureType) configurer {
return func(conf *config) {
conf.signature = signature
}
}
// WithRegion is a configurer for ObsClient.
func WithRegion(region string) configurer {
return func(conf *config) {
conf.region = region
}
}
// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing
// an http/https connection, in seconds.
func WithConnectTimeout(connectTimeout int) configurer {
return func(conf *config) {
conf.connectTimeout = connectTimeout
}
}
// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at
// the socket layer, in seconds.
func WithSocketTimeout(socketTimeout int) configurer {
return func(conf *config) {
conf.socketTimeout = socketTimeout
}
}
// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection
// in the connection pool, in seconds.
func WithIdleConnTimeout(idleConnTimeout int) configurer {
return func(conf *config) {
conf.idleConnTimeout = idleConnTimeout
}
}
// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal.
func WithMaxRetryCount(maxRetryCount int) configurer {
return func(conf *config) {
conf.maxRetryCount = maxRetryCount
}
}
// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys.
func WithSecurityToken(securityToken string) configurer {
return func(conf *config) {
for _, sp := range conf.securityProviders {
if bsp, ok := sp.(*BasicSecurityProvider); ok {
sh := bsp.getSecurity()
bsp.refresh(sh.ak, sh.sk, securityToken)
break
}
}
}
}
// WithHttpTransport is a configurer for ObsClient to set the customized http Transport.
func WithHttpTransport(transport *http.Transport) configurer {
return func(conf *config) {
conf.transport = transport
}
}
func WithHttpClient(httpClient *http.Client) configurer {
return func(conf *config) {
conf.httpClient = httpClient
}
}
// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request.
func WithRequestContext(ctx context.Context) configurer {
return func(conf *config) {
conf.ctx = ctx
}
}
// WithCustomDomainName is a configurer for ObsClient.
func WithCustomDomainName(cname bool) configurer {
return func(conf *config) {
conf.cname = cname
}
}
// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected.
func WithMaxRedirectCount(maxRedirectCount int) configurer {
return func(conf *config) {
conf.maxRedirectCount = maxRedirectCount
}
}
// WithUserAgent is a configurer for ObsClient to set the User-Agent.
func WithUserAgent(userAgent string) configurer {
return func(conf *config) {
conf.userAgent = userAgent
}
}
// WithEnableCompression is a configurer for ObsClient to set the Transport.DisableCompression.
func WithEnableCompression(enableCompression bool) configurer {
return func(conf *config) {
conf.enableCompression = enableCompression
}
}
func (conf *config) prepareConfig() {
if conf.connectTimeout <= 0 {
conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
}
if conf.socketTimeout <= 0 {
conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
}
conf.finalTimeout = conf.socketTimeout * 10
if conf.headerTimeout <= 0 {
conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
}
if conf.idleConnTimeout < 0 {
conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
}
if conf.maxRetryCount < 0 {
conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
}
if conf.maxConnsPerHost <= 0 {
conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
}
if conf.maxRedirectCount < 0 {
conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT
}
if conf.pathStyle && conf.signature == SignatureObs {
conf.signature = SignatureV2
}
}
func (conf *config) initConfigWithDefault() error {
conf.endpoint = strings.TrimSpace(conf.endpoint)
if conf.endpoint == "" {
return errors.New("endpoint is not set")
}
if index := strings.Index(conf.endpoint, "?"); index > 0 {
conf.endpoint = conf.endpoint[:index]
}
for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
}
if conf.signature == "" {
conf.signature = DEFAULT_SIGNATURE
}
urlHolder := &urlHolder{}
var address string
if strings.HasPrefix(conf.endpoint, "https://") {
urlHolder.scheme = "https"
address = conf.endpoint[len("https://"):]
} else if strings.HasPrefix(conf.endpoint, "http://") {
urlHolder.scheme = "http"
address = conf.endpoint[len("http://"):]
} else {
urlHolder.scheme = "https"
address = conf.endpoint
}
addr := strings.Split(address, ":")
if len(addr) == 2 {
if port, err := strconv.Atoi(addr[1]); err == nil {
urlHolder.port = port
}
}
urlHolder.host = addr[0]
if urlHolder.port == 0 {
if urlHolder.scheme == "https" {
urlHolder.port = 443
} else {
urlHolder.port = 80
}
}
if IsIP(urlHolder.host) {
conf.pathStyle = true
}
conf.urlHolder = urlHolder
conf.region = strings.TrimSpace(conf.region)
if conf.region == "" {
conf.region = DEFAULT_REGION
}
conf.prepareConfig()
conf.proxyURL = strings.TrimSpace(conf.proxyURL)
return nil
}
func (conf *config) getTransport() error {
if conf.transport == nil {
conf.transport = &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
if err != nil {
return nil, err
}
return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
},
MaxIdleConns: conf.maxConnsPerHost,
MaxIdleConnsPerHost: conf.maxConnsPerHost,
ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
}
if conf.proxyURL != "" {
proxyURL, err := url.Parse(conf.proxyURL)
if err != nil {
return err
}
conf.transport.Proxy = http.ProxyURL(proxyURL)
}
tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
if conf.sslVerify && conf.pemCerts != nil {
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(conf.pemCerts)
tlsConfig.RootCAs = pool
}
conf.transport.TLSClientConfig = tlsConfig
conf.transport.DisableCompression = !conf.enableCompression
}
return nil
}
func checkRedirectFunc(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// DummyQueryEscape return the input string.
func DummyQueryEscape(s string) string {
return s
}
func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) {
urlHolder := conf.urlHolder
if conf.cname {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
if conf.signature == "v4" {
canonicalizedURL = "/"
} else {
canonicalizedURL = "/" + urlHolder.host + "/"
}
} else {
if bucketName == "" {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
canonicalizedURL = "/"
} else {
if conf.pathStyle {
requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
canonicalizedURL = "/" + bucketName
} else {
requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
if conf.signature == "v2" || conf.signature == "OBS" {
canonicalizedURL = "/" + bucketName + "/"
} else {
canonicalizedURL = "/"
}
}
}
}
return
}
func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) {
if escape {
tempKey := []rune(objectKey)
result := make([]string, 0, len(tempKey))
for _, value := range tempKey {
if string(value) == "/" {
result = append(result, string(value))
} else {
if string(value) == " " {
result = append(result, url.PathEscape(string(value)))
} else {
result = append(result, url.QueryEscape(string(value)))
}
}
}
encodeObjectKey = strings.Join(result, "")
} else {
encodeObjectKey = escapeFunc(objectKey)
}
return
}
func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) {
if escape {
return url.QueryEscape
}
return DummyQueryEscape
}
func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) {
requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName)
var escapeFunc func(s string) string
escapeFunc = conf.prepareEscapeFunc(escape)
if objectKey != "" {
var encodeObjectKey string
encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc)
requestURL += "/" + encodeObjectKey
if !strings.HasSuffix(canonicalizedURL, "/") {
canonicalizedURL += "/"
}
canonicalizedURL += encodeObjectKey
}
keys := make([]string, 0, len(params))
for key := range params {
keys = append(keys, strings.TrimSpace(key))
}
sort.Strings(keys)
i := 0
for index, key := range keys {
if index == 0 {
requestURL += "?"
} else {
requestURL += "&"
}
_key := url.QueryEscape(key)
requestURL += _key
_value := params[key]
if conf.signature == "v4" {
requestURL += "=" + url.QueryEscape(_value)
} else {
if _value != "" {
requestURL += "=" + url.QueryEscape(_value)
_value = "=" + _value
} else {
_value = ""
}
lowerKey := strings.ToLower(key)
_, ok := allowedResourceParameterNames[lowerKey]
prefixHeader := HEADER_PREFIX
isObs := conf.signature == SignatureObs
if isObs {
prefixHeader = HEADER_PREFIX_OBS
}
ok = ok || strings.HasPrefix(lowerKey, prefixHeader)
if ok {
if i == 0 {
canonicalizedURL += "?"
} else {
canonicalizedURL += "&"
}
canonicalizedURL += getQueryURL(_key, _value)
i++
}
}
}
return
}
func getQueryURL(key, value string) string {
queryURL := ""
queryURL += key
queryURL += value
return queryURL
}

@ -1,983 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
const (
OBS_SDK_VERSION = "3.22.11"
USER_AGENT = "obs-sdk-go/" + OBS_SDK_VERSION
HEADER_PREFIX = "x-amz-"
HEADER_PREFIX_META = "x-amz-meta-"
HEADER_PREFIX_OBS = "x-obs-"
HEADER_PREFIX_META_OBS = "x-obs-meta-"
HEADER_DATE_AMZ = "x-amz-date"
HEADER_DATE_OBS = "x-obs-date"
HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
HEADER_STS_TOKEN_OBS = "x-obs-security-token"
HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId"
PREFIX_META = "meta-"
HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
HEADER_ACL_AMZ = "x-amz-acl"
HEADER_ACL_OBS = "x-obs-acl"
HEADER_ACL = "acl"
HEADER_LOCATION_AMZ = "location"
HEADER_BUCKET_LOCATION_OBS = "bucket-location"
HEADER_COPY_SOURCE = "copy-source"
HEADER_COPY_SOURCE_RANGE = "copy-source-range"
HEADER_RANGE = "Range"
HEADER_STORAGE_CLASS = "x-default-storage-class"
HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class"
HEADER_FS_FILE_INTERFACE_OBS = "x-obs-fs-file-interface"
HEADER_MODE = "mode"
HEADER_VERSION_OBS = "version"
HEADER_REQUEST_PAYER = "x-amz-request-payer"
HEADER_GRANT_READ_OBS = "grant-read"
HEADER_GRANT_WRITE_OBS = "grant-write"
HEADER_GRANT_READ_ACP_OBS = "grant-read-acp"
HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp"
HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control"
HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered"
HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered"
HEADER_REQUEST_ID = "request-id"
HEADER_BUCKET_REGION = "bucket-region"
HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
HEADER_EPID_HEADERS = "epid"
HEADER_VERSION_ID = "version-id"
HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
HEADER_DELETE_MARKER = "delete-marker"
HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
HEADER_METADATA_DIRECTIVE = "metadata-directive"
HEADER_EXPIRATION = "expiration"
HEADER_EXPIRES_OBS = "x-obs-expires"
HEADER_RESTORE = "restore"
HEADER_OBJECT_TYPE = "object-type"
HEADER_NEXT_APPEND_POSITION = "next-append-position"
HEADER_STORAGE_CLASS2 = "storage-class"
HEADER_CONTENT_LENGTH = "content-length"
HEADER_CONTENT_TYPE = "content-type"
HEADER_CONTENT_LANGUAGE = "content-language"
HEADER_EXPIRES = "expires"
HEADER_CACHE_CONTROL = "cache-control"
HEADER_CONTENT_DISPOSITION = "content-disposition"
HEADER_CONTENT_ENCODING = "content-encoding"
HEADER_AZ_REDUNDANCY = "az-redundancy"
HEADER_BUCKET_TYPE = "bucket-type"
headerOefMarker = "oef-marker"
HEADER_ETAG = "etag"
HEADER_LASTMODIFIED = "last-modified"
HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match"
HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match"
HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since"
HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since"
HEADER_IF_MATCH = "If-Match"
HEADER_IF_NONE_MATCH = "If-None-Match"
HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
HEADER_SSEC_KEY = "server-side-encryption-customer-key"
HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"
HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id"
HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm"
HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key"
HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5"
HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"
HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id"
HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect"
headerFSFileInterface = "fs-file-interface"
HEADER_DATE_CAMEL = "Date"
HEADER_HOST_CAMEL = "Host"
HEADER_HOST = "host"
HEADER_AUTH_CAMEL = "Authorization"
HEADER_MD5_CAMEL = "Content-MD5"
HEADER_LOCATION_CAMEL = "Location"
HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
HEADER_CONTENT_TYPE_CAML = "Content-Type"
HEADER_USER_AGENT_CAMEL = "User-Agent"
HEADER_ORIGIN_CAMEL = "Origin"
HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
HEADER_CACHE_CONTROL_CAMEL = "Cache-Control"
HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition"
HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding"
HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language"
HEADER_EXPIRES_CAMEL = "Expires"
PARAM_VERSION_ID = "versionId"
PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
PARAM_RESPONSE_EXPIRES = "response-expires"
PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
PARAM_IMAGE_PROCESS = "x-image-process"
PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
PARAM_DATE_OBS_CAMEL = "X-Obs-Date"
PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"
DEFAULT_SIGNATURE = SignatureV2
DEFAULT_REGION = "region"
DEFAULT_CONNECT_TIMEOUT = 60
DEFAULT_SOCKET_TIMEOUT = 60
DEFAULT_HEADER_TIMEOUT = 60
DEFAULT_IDLE_CONN_TIMEOUT = 30
DEFAULT_MAX_RETRY_COUNT = 3
DEFAULT_MAX_REDIRECT_COUNT = 3
DEFAULT_MAX_CONN_PER_HOST = 1000
EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
LONG_DATE_FORMAT = "20060102T150405Z"
SHORT_DATE_FORMAT = "20060102"
ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"
V4_SERVICE_NAME = "s3"
V4_SERVICE_SUFFIX = "aws4_request"
V2_HASH_PREFIX = "AWS"
OBS_HASH_PREFIX = "OBS"
V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
V4_HASH_PRE = "AWS4"
DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms"
DEFAULT_SSE_C_ENCRYPTION = "AES256"
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_HEAD = "HEAD"
HTTP_OPTIONS = "OPTIONS"
REQUEST_PAYER = "request-payer"
TRAFFIC_LIMIT = "traffic-limit"
MULTI_AZ = "3az"
MAX_PART_SIZE = 5 * 1024 * 1024 * 1024
MIN_PART_SIZE = 100 * 1024
DEFAULT_PART_SIZE = 9 * 1024 * 1024
MAX_PART_NUM = 10000
)
// SignatureType defines type of signature
type SignatureType string
const (
// SignatureV2 signature type v2
SignatureV2 SignatureType = "v2"
// SignatureV4 signature type v4
SignatureV4 SignatureType = "v4"
// SignatureObs signature type OBS
SignatureObs SignatureType = "OBS"
)
var (
interestedHeaders = []string{"content-md5", "content-type", "date"}
allowedRequestHTTPHeaderMetadataNames = map[string]bool{
"content-type": true,
"content-md5": true,
"content-length": true,
"content-language": true,
"expires": true,
"origin": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
"access-control-request-method": true,
"access-control-request-headers": true,
"x-default-storage-class": true,
"location": true,
"date": true,
"etag": true,
"range": true,
"host": true,
"if-modified-since": true,
"if-unmodified-since": true,
"if-match": true,
"if-none-match": true,
"last-modified": true,
"content-range": true,
}
allowedResourceParameterNames = map[string]bool{
"acl": true,
"backtosource": true,
"metadata": true,
"policy": true,
"torrent": true,
"logging": true,
"location": true,
"storageinfo": true,
"quota": true,
"storageclass": true,
"storagepolicy": true,
"requestpayment": true,
"versions": true,
"versioning": true,
"versionid": true,
"uploads": true,
"uploadid": true,
"partnumber": true,
"website": true,
"notification": true,
"lifecycle": true,
"deletebucket": true,
"delete": true,
"cors": true,
"restore": true,
"encryption": true,
"tagging": true,
"append": true,
"modify": true,
"position": true,
"replication": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"x-image-process": true,
"x-oss-process": true,
"x-image-save-bucket": true,
"x-image-save-object": true,
"ignore-sign-in-query": true,
"name": true,
"rename": true,
}
mimeTypes = map[string]string{
"001": "application/x-001",
"301": "application/x-301",
"323": "text/h323",
"7z": "application/x-7z-compressed",
"906": "application/x-906",
"907": "drawing/907",
"IVF": "video/x-ivf",
"a11": "application/x-a11",
"aac": "audio/x-aac",
"acp": "audio/x-mei-aac",
"ai": "application/postscript",
"aif": "audio/aiff",
"aifc": "audio/aiff",
"aiff": "audio/aiff",
"anv": "application/x-anv",
"apk": "application/vnd.android.package-archive",
"asa": "text/asa",
"asf": "video/x-ms-asf",
"asp": "text/asp",
"asx": "video/x-ms-asf",
"atom": "application/atom+xml",
"au": "audio/basic",
"avi": "video/avi",
"awf": "application/vnd.adobe.workflow",
"biz": "text/xml",
"bmp": "application/x-bmp",
"bot": "application/x-bot",
"bz2": "application/x-bzip2",
"c4t": "application/x-c4t",
"c90": "application/x-c90",
"cal": "application/x-cals",
"cat": "application/vnd.ms-pki.seccat",
"cdf": "application/x-netcdf",
"cdr": "application/x-cdr",
"cel": "application/x-cel",
"cer": "application/x-x509-ca-cert",
"cg4": "application/x-g4",
"cgm": "application/x-cgm",
"cit": "application/x-cit",
"class": "java/*",
"cml": "text/xml",
"cmp": "application/x-cmp",
"cmx": "application/x-cmx",
"cot": "application/x-cot",
"crl": "application/pkix-crl",
"crt": "application/x-x509-ca-cert",
"csi": "application/x-csi",
"css": "text/css",
"csv": "text/csv",
"cu": "application/cu-seeme",
"cut": "application/x-cut",
"dbf": "application/x-dbf",
"dbm": "application/x-dbm",
"dbx": "application/x-dbx",
"dcd": "text/xml",
"dcx": "application/x-dcx",
"deb": "application/x-debian-package",
"der": "application/x-x509-ca-cert",
"dgn": "application/x-dgn",
"dib": "application/x-dib",
"dll": "application/x-msdownload",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"dot": "application/msword",
"drw": "application/x-drw",
"dtd": "text/xml",
"dvi": "application/x-dvi",
"dwf": "application/x-dwf",
"dwg": "application/x-dwg",
"dxb": "application/x-dxb",
"dxf": "application/x-dxf",
"edn": "application/vnd.adobe.edn",
"emf": "application/x-emf",
"eml": "message/rfc822",
"ent": "text/xml",
"eot": "application/vnd.ms-fontobject",
"epi": "application/x-epi",
"eps": "application/postscript",
"epub": "application/epub+zip",
"etd": "application/x-ebx",
"etx": "text/x-setext",
"exe": "application/x-msdownload",
"fax": "image/fax",
"fdf": "application/vnd.fdf",
"fif": "application/fractals",
"flac": "audio/flac",
"flv": "video/x-flv",
"fo": "text/xml",
"frm": "application/x-frm",
"g4": "application/x-g4",
"gbr": "application/x-gbr",
"gif": "image/gif",
"gl2": "application/x-gl2",
"gp4": "application/x-gp4",
"gz": "application/gzip",
"hgl": "application/x-hgl",
"hmr": "application/x-hmr",
"hpg": "application/x-hpgl",
"hpl": "application/x-hpl",
"hqx": "application/mac-binhex40",
"hrf": "application/x-hrf",
"hta": "application/hta",
"htc": "text/x-component",
"htm": "text/html",
"html": "text/html",
"htt": "text/webviewhtml",
"htx": "text/html",
"icb": "application/x-icb",
"ico": "application/x-ico",
"ics": "text/calendar",
"iff": "application/x-iff",
"ig4": "application/x-g4",
"igs": "application/x-igs",
"iii": "application/x-iphone",
"img": "application/x-img",
"ini": "text/plain",
"ins": "application/x-internet-signup",
"ipa": "application/vnd.iphone",
"iso": "application/x-iso9660-image",
"isp": "application/x-internet-signup",
"jar": "application/java-archive",
"java": "java/*",
"jfif": "image/jpeg",
"jpe": "image/jpeg",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"js": "application/x-javascript",
"json": "application/json",
"jsp": "text/html",
"la1": "audio/x-liquid-file",
"lar": "application/x-laplayer-reg",
"latex": "application/x-latex",
"lavs": "audio/x-liquid-secure",
"lbm": "application/x-lbm",
"lmsff": "audio/x-la-lms",
"log": "text/plain",
"ls": "application/x-javascript",
"ltr": "application/x-ltr",
"m1v": "video/x-mpeg",
"m2v": "video/x-mpeg",
"m3u": "audio/mpegurl",
"m4a": "audio/mp4",
"m4e": "video/mpeg4",
"m4v": "video/mp4",
"mac": "application/x-mac",
"man": "application/x-troff-man",
"math": "text/xml",
"mdb": "application/msaccess",
"mfp": "application/x-shockwave-flash",
"mht": "message/rfc822",
"mhtml": "message/rfc822",
"mi": "application/x-mi",
"mid": "audio/mid",
"midi": "audio/mid",
"mil": "application/x-mil",
"mml": "text/xml",
"mnd": "audio/x-musicnet-download",
"mns": "audio/x-musicnet-stream",
"mocha": "application/x-javascript",
"mov": "video/quicktime",
"movie": "video/x-sgi-movie",
"mp1": "audio/mp1",
"mp2": "audio/mp2",
"mp2v": "video/mpeg",
"mp3": "audio/mp3",
"mp4": "video/mp4",
"mp4a": "audio/mp4",
"mp4v": "video/mp4",
"mpa": "video/x-mpg",
"mpd": "application/vnd.ms-project",
"mpe": "video/mpeg",
"mpeg": "video/mpeg",
"mpg": "video/mpeg",
"mpg4": "video/mp4",
"mpga": "audio/rn-mpeg",
"mpp": "application/vnd.ms-project",
"mps": "video/x-mpeg",
"mpt": "application/vnd.ms-project",
"mpv": "video/mpg",
"mpv2": "video/mpeg",
"mpw": "application/vnd.ms-project",
"mpx": "application/vnd.ms-project",
"mtx": "text/xml",
"mxp": "application/x-mmxp",
"net": "image/pnetvue",
"nrf": "application/x-nrf",
"nws": "message/rfc822",
"odc": "text/x-ms-odc",
"oga": "audio/ogg",
"ogg": "audio/ogg",
"ogv": "video/ogg",
"ogx": "application/ogg",
"out": "application/x-out",
"p10": "application/pkcs10",
"p12": "application/x-pkcs12",
"p7b": "application/x-pkcs7-certificates",
"p7c": "application/pkcs7-mime",
"p7m": "application/pkcs7-mime",
"p7r": "application/x-pkcs7-certreqresp",
"p7s": "application/pkcs7-signature",
"pbm": "image/x-portable-bitmap",
"pc5": "application/x-pc5",
"pci": "application/x-pci",
"pcl": "application/x-pcl",
"pcx": "application/x-pcx",
"pdf": "application/pdf",
"pdx": "application/vnd.adobe.pdx",
"pfx": "application/x-pkcs12",
"pgl": "application/x-pgl",
"pgm": "image/x-portable-graymap",
"pic": "application/x-pic",
"pko": "application/vnd.ms-pki.pko",
"pl": "application/x-perl",
"plg": "text/html",
"pls": "audio/scpls",
"plt": "application/x-plt",
"png": "image/png",
"pnm": "image/x-portable-anymap",
"pot": "application/vnd.ms-powerpoint",
"ppa": "application/vnd.ms-powerpoint",
"ppm": "application/x-ppm",
"pps": "application/vnd.ms-powerpoint",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"pr": "application/x-pr",
"prf": "application/pics-rules",
"prn": "application/x-prn",
"prt": "application/x-prt",
"ps": "application/postscript",
"ptn": "application/x-ptn",
"pwz": "application/vnd.ms-powerpoint",
"qt": "video/quicktime",
"r3t": "text/vnd.rn-realtext3d",
"ra": "audio/vnd.rn-realaudio",
"ram": "audio/x-pn-realaudio",
"rar": "application/x-rar-compressed",
"ras": "application/x-ras",
"rat": "application/rat-file",
"rdf": "text/xml",
"rec": "application/vnd.rn-recording",
"red": "application/x-red",
"rgb": "application/x-rgb",
"rjs": "application/vnd.rn-realsystem-rjs",
"rjt": "application/vnd.rn-realsystem-rjt",
"rlc": "application/x-rlc",
"rle": "application/x-rle",
"rm": "application/vnd.rn-realmedia",
"rmf": "application/vnd.adobe.rmf",
"rmi": "audio/mid",
"rmj": "application/vnd.rn-realsystem-rmj",
"rmm": "audio/x-pn-realaudio",
"rmp": "application/vnd.rn-rn_music_package",
"rms": "application/vnd.rn-realmedia-secure",
"rmvb": "application/vnd.rn-realmedia-vbr",
"rmx": "application/vnd.rn-realsystem-rmx",
"rnx": "application/vnd.rn-realplayer",
"rp": "image/vnd.rn-realpix",
"rpm": "audio/x-pn-realaudio-plugin",
"rsml": "application/vnd.rn-rsml",
"rss": "application/rss+xml",
"rt": "text/vnd.rn-realtext",
"rtf": "application/x-rtf",
"rv": "video/vnd.rn-realvideo",
"sam": "application/x-sam",
"sat": "application/x-sat",
"sdp": "application/sdp",
"sdw": "application/x-sdw",
"sgm": "text/sgml",
"sgml": "text/sgml",
"sis": "application/vnd.symbian.install",
"sisx": "application/vnd.symbian.install",
"sit": "application/x-stuffit",
"slb": "application/x-slb",
"sld": "application/x-sld",
"slk": "drawing/x-slk",
"smi": "application/smil",
"smil": "application/smil",
"smk": "application/x-smk",
"snd": "audio/basic",
"sol": "text/plain",
"sor": "text/plain",
"spc": "application/x-pkcs7-certificates",
"spl": "application/futuresplash",
"spp": "text/xml",
"ssm": "application/streamingmedia",
"sst": "application/vnd.ms-pki.certstore",
"stl": "application/vnd.ms-pki.stl",
"stm": "text/html",
"sty": "application/x-sty",
"svg": "image/svg+xml",
"swf": "application/x-shockwave-flash",
"tar": "application/x-tar",
"tdf": "application/x-tdf",
"tg4": "application/x-tg4",
"tga": "application/x-tga",
"tif": "image/tiff",
"tiff": "image/tiff",
"tld": "text/xml",
"top": "drawing/x-top",
"torrent": "application/x-bittorrent",
"tsd": "text/xml",
"ttf": "application/x-font-ttf",
"txt": "text/plain",
"uin": "application/x-icq",
"uls": "text/iuls",
"vcf": "text/x-vcard",
"vda": "application/x-vda",
"vdx": "application/vnd.visio",
"vml": "text/xml",
"vpg": "application/x-vpeg005",
"vsd": "application/vnd.visio",
"vss": "application/vnd.visio",
"vst": "application/x-vst",
"vsw": "application/vnd.visio",
"vsx": "application/vnd.visio",
"vtx": "application/vnd.visio",
"vxml": "text/xml",
"wav": "audio/wav",
"wax": "audio/x-ms-wax",
"wb1": "application/x-wb1",
"wb2": "application/x-wb2",
"wb3": "application/x-wb3",
"wbmp": "image/vnd.wap.wbmp",
"webm": "video/webm",
"wiz": "application/msword",
"wk3": "application/x-wk3",
"wk4": "application/x-wk4",
"wkq": "application/x-wkq",
"wks": "application/x-wks",
"wm": "video/x-ms-wm",
"wma": "audio/x-ms-wma",
"wmd": "application/x-ms-wmd",
"wmf": "application/x-wmf",
"wml": "text/vnd.wap.wml",
"wmv": "video/x-ms-wmv",
"wmx": "video/x-ms-wmx",
"wmz": "application/x-ms-wmz",
"woff": "application/x-font-woff",
"wp6": "application/x-wp6",
"wpd": "application/x-wpd",
"wpg": "application/x-wpg",
"wpl": "application/vnd.ms-wpl",
"wq1": "application/x-wq1",
"wr1": "application/x-wr1",
"wri": "application/x-wri",
"wrk": "application/x-wrk",
"ws": "application/x-ws",
"ws2": "application/x-ws",
"wsc": "text/scriptlet",
"wsdl": "text/xml",
"wvx": "video/x-ms-wvx",
"x_b": "application/x-x_b",
"x_t": "application/x-x_t",
"xap": "application/x-silverlight-app",
"xbm": "image/x-xbitmap",
"xdp": "application/vnd.adobe.xdp",
"xdr": "text/xml",
"xfd": "application/vnd.adobe.xfd",
"xfdf": "application/vnd.adobe.xfdf",
"xhtml": "text/html",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlw": "application/x-xlw",
"xml": "text/xml",
"xpl": "audio/scpls",
"xpm": "image/x-xpixmap",
"xq": "text/xml",
"xql": "text/xml",
"xquery": "text/xml",
"xsd": "text/xml",
"xsl": "text/xml",
"xslt": "text/xml",
"xwd": "application/x-xwd",
"yaml": "text/yaml",
"yml": "text/yaml",
"zip": "application/zip",
"dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
"wps": "application/vnd.ms-works",
"wpt": "x-lml/x-gps",
"pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12",
"heic": "image/heic",
"mkv": "video/x-matroska",
"raw": "image/x-panasonic-raw",
}
)
// HttpMethodType defines http method type
type HttpMethodType string
const (
HttpMethodGet HttpMethodType = HTTP_GET
HttpMethodPut HttpMethodType = HTTP_PUT
HttpMethodPost HttpMethodType = HTTP_POST
HttpMethodDelete HttpMethodType = HTTP_DELETE
HttpMethodHead HttpMethodType = HTTP_HEAD
HttpMethodOptions HttpMethodType = HTTP_OPTIONS
)
// SubResourceType defines the subResource value
type SubResourceType string
const (
// SubResourceStoragePolicy subResource value: storagePolicy
SubResourceStoragePolicy SubResourceType = "storagePolicy"
// SubResourceStorageClass subResource value: storageClass
SubResourceStorageClass SubResourceType = "storageClass"
// SubResourceQuota subResource value: quota
SubResourceQuota SubResourceType = "quota"
// SubResourceStorageInfo subResource value: storageinfo
SubResourceStorageInfo SubResourceType = "storageinfo"
// SubResourceLocation subResource value: location
SubResourceLocation SubResourceType = "location"
// SubResourceAcl subResource value: acl
SubResourceAcl SubResourceType = "acl"
// SubResourcePolicy subResource value: policy
SubResourcePolicy SubResourceType = "policy"
// SubResourceCors subResource value: cors
SubResourceCors SubResourceType = "cors"
// SubResourceVersioning subResource value: versioning
SubResourceVersioning SubResourceType = "versioning"
// SubResourceWebsite subResource value: website
SubResourceWebsite SubResourceType = "website"
// SubResourceLogging subResource value: logging
SubResourceLogging SubResourceType = "logging"
// SubResourceLifecycle subResource value: lifecycle
SubResourceLifecycle SubResourceType = "lifecycle"
// SubResourceNotification subResource value: notification
SubResourceNotification SubResourceType = "notification"
// SubResourceEncryption subResource value: encryption
SubResourceEncryption SubResourceType = "encryption"
// SubResourceTagging subResource value: tagging
SubResourceTagging SubResourceType = "tagging"
// SubResourceDelete subResource value: delete
SubResourceDelete SubResourceType = "delete"
// SubResourceVersions subResource value: versions
SubResourceVersions SubResourceType = "versions"
// SubResourceUploads subResource value: uploads
SubResourceUploads SubResourceType = "uploads"
// SubResourceRestore subResource value: restore
SubResourceRestore SubResourceType = "restore"
// SubResourceMetadata subResource value: metadata
SubResourceMetadata SubResourceType = "metadata"
// SubResourceRequestPayment subResource value: requestPayment
SubResourceRequestPayment SubResourceType = "requestPayment"
// SubResourceAppend subResource value: append
SubResourceAppend SubResourceType = "append"
// SubResourceModify subResource value: modify
SubResourceModify SubResourceType = "modify"
SubResourceRename SubResourceType = "rename"
)
// objectKeyType defines the objectKey value
type objectKeyType string
const (
// objectKeyExtensionPolicy objectKey value: v1/extension_policy
objectKeyExtensionPolicy objectKeyType = "v1/extension_policy"
// objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs
objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs"
)
// AclType defines bucket/object acl type
type AclType string
const (
AclPrivate AclType = "private"
AclPublicRead AclType = "public-read"
AclPublicReadWrite AclType = "public-read-write"
AclAuthenticatedRead AclType = "authenticated-read"
AclBucketOwnerRead AclType = "bucket-owner-read"
AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
AclLogDeliveryWrite AclType = "log-delivery-write"
AclPublicReadDelivery AclType = "public-read-delivered"
AclPublicReadWriteDelivery AclType = "public-read-write-delivered"
)
// StorageClassType defines bucket storage class
type StorageClassType string
const (
//StorageClassStandard storage class: STANDARD
StorageClassStandard StorageClassType = "STANDARD"
//StorageClassWarm storage class: WARM
StorageClassWarm StorageClassType = "WARM"
//StorageClassCold storage class: COLD
StorageClassCold StorageClassType = "COLD"
storageClassStandardIA StorageClassType = "STANDARD_IA"
storageClassGlacier StorageClassType = "GLACIER"
)
// PermissionType defines permission type
type PermissionType string
const (
// PermissionRead permission type: READ
PermissionRead PermissionType = "READ"
// PermissionWrite permission type: WRITE
PermissionWrite PermissionType = "WRITE"
// PermissionReadAcp permission type: READ_ACP
PermissionReadAcp PermissionType = "READ_ACP"
// PermissionWriteAcp permission type: WRITE_ACP
PermissionWriteAcp PermissionType = "WRITE_ACP"
// PermissionFullControl permission type: FULL_CONTROL
PermissionFullControl PermissionType = "FULL_CONTROL"
)
// GranteeType defines grantee type
type GranteeType string
const (
// GranteeGroup grantee type: Group
GranteeGroup GranteeType = "Group"
// GranteeUser grantee type: CanonicalUser
GranteeUser GranteeType = "CanonicalUser"
)
// GroupUriType defines grantee uri type
type GroupUriType string
const (
// GroupAllUsers grantee uri type: AllUsers
GroupAllUsers GroupUriType = "AllUsers"
// GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers
GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers"
// GroupLogDelivery grantee uri type: LogDelivery
GroupLogDelivery GroupUriType = "LogDelivery"
)
// VersioningStatusType defines bucket version status
type VersioningStatusType string
const (
// VersioningStatusEnabled version status: Enabled
VersioningStatusEnabled VersioningStatusType = "Enabled"
// VersioningStatusSuspended version status: Suspended
VersioningStatusSuspended VersioningStatusType = "Suspended"
)
// ProtocolType defines protocol type
type ProtocolType string
const (
// ProtocolHttp prorocol type: http
ProtocolHttp ProtocolType = "http"
// ProtocolHttps prorocol type: https
ProtocolHttps ProtocolType = "https"
)
// RuleStatusType defines lifeCycle rule status
type RuleStatusType string
const (
// RuleStatusEnabled rule status: Enabled
RuleStatusEnabled RuleStatusType = "Enabled"
// RuleStatusDisabled rule status: Disabled
RuleStatusDisabled RuleStatusType = "Disabled"
)
// RestoreTierType defines restore options
type RestoreTierType string
const (
// RestoreTierExpedited restore options: Expedited
RestoreTierExpedited RestoreTierType = "Expedited"
// RestoreTierStandard restore options: Standard
RestoreTierStandard RestoreTierType = "Standard"
// RestoreTierBulk restore options: Bulk
RestoreTierBulk RestoreTierType = "Bulk"
)
// MetadataDirectiveType defines metadata operation indicator
type MetadataDirectiveType string
const (
// CopyMetadata metadata operation: COPY
CopyMetadata MetadataDirectiveType = "COPY"
// ReplaceNew metadata operation: REPLACE_NEW
ReplaceNew MetadataDirectiveType = "REPLACE_NEW"
// ReplaceMetadata metadata operation: REPLACE
ReplaceMetadata MetadataDirectiveType = "REPLACE"
)
// EventType defines bucket notification type of events
type EventType string
const (
// ObjectCreatedAll type of events: ObjectCreated:*
ObjectCreatedAll EventType = "ObjectCreated:*"
// ObjectCreatedPut type of events: ObjectCreated:Put
ObjectCreatedPut EventType = "ObjectCreated:Put"
// ObjectCreatedPost type of events: ObjectCreated:Post
ObjectCreatedPost EventType = "ObjectCreated:Post"
// ObjectCreatedCopy type of events: ObjectCreated:Copy
ObjectCreatedCopy EventType = "ObjectCreated:Copy"
// ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload
ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload"
// ObjectRemovedAll type of events: ObjectRemoved:*
ObjectRemovedAll EventType = "ObjectRemoved:*"
// ObjectRemovedDelete type of events: ObjectRemoved:Delete
ObjectRemovedDelete EventType = "ObjectRemoved:Delete"
// ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated
ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated"
)
// PayerType defines type of payer
type PayerType string
const (
// BucketOwnerPayer type of payer: BucketOwner
BucketOwnerPayer PayerType = "BucketOwner"
// RequesterPayer type of payer: Requester
RequesterPayer PayerType = "Requester"
// Requester header for requester-Pays
Requester PayerType = "requester"
)
// FetchPolicyStatusType defines type of fetch policy status
type FetchPolicyStatusType string
const (
// FetchStatusOpen type of status: open
FetchStatusOpen FetchPolicyStatusType = "open"
// FetchStatusClosed type of status: closed
FetchStatusClosed FetchPolicyStatusType = "closed"
)
// AvailableZoneType defines type of az redundancy
type AvailableZoneType string
const (
AvailableZoneMultiAz AvailableZoneType = "3az"
)
// FSStatusType defines type of file system status
type FSStatusType string
const (
FSStatusEnabled FSStatusType = "Enabled"
FSStatusDisabled FSStatusType = "Disabled"
)
// BucketType defines type of bucket
type BucketType string
const (
OBJECT BucketType = "OBJECT"
POSIX BucketType = "POSIX"
)

File diff suppressed because it is too large Load Diff

@ -1,34 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
"fmt"
)
// ObsError defines error response from OBS
type ObsError struct {
BaseModel
Status string
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code" json:"code"`
Message string `xml:"Message" json:"message"`
Resource string `xml:"Resource"`
HostId string `xml:"HostId"`
}
func (err ObsError) Error() string {
return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s",
err.Status, err.Code, err.Message, err.RequestId)
}

@ -1,41 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"fmt"
"strconv"
"strings"
)
type extensionOptions interface{}
type extensionHeaders func(headers map[string][]string, isObs bool) error
func setHeaderPrefix(key string, value string) extensionHeaders {
return func(headers map[string][]string, isObs bool) error {
if strings.TrimSpace(value) == "" {
return fmt.Errorf("set header %s with empty value", key)
}
setHeaders(headers, key, []string{value}, isObs)
return nil
}
}
// WithReqPaymentHeader sets header for requester-pays
func WithReqPaymentHeader(requester PayerType) extensionHeaders {
return setHeaderPrefix(REQUEST_PAYER, string(requester))
}
func WithTrafficLimitHeader(trafficLimit int64) extensionHeaders {
return setHeaderPrefix(TRAFFIC_LIMIT, strconv.FormatInt(trafficLimit, 10))
}

@ -1,638 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
)
func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string {
_headers := make(map[string][]string, len(headers))
if headers != nil {
for key, value := range headers {
key = strings.TrimSpace(key)
if key == "" {
continue
}
_key := strings.ToLower(key)
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
if !meta {
continue
}
if !isObs {
_key = HEADER_PREFIX_META + _key
} else {
_key = HEADER_PREFIX_META_OBS + _key
}
} else {
_key = key
}
_headers[_key] = value
}
}
return _headers
}
func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient.doAction(action, method, "", "", input, output, true, true, extensions)
}
func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions)
}
func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions)
}
func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions)
}
func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
if strings.TrimSpace(objectKey) == "" {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions)
}
func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions)
}
func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
if strings.TrimSpace(objectKey) == "" {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions)
}
func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error {
var resp *http.Response
var respError error
doLog(LEVEL_INFO, "Enter method %s...", action)
start := GetCurrentTimestamp()
params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs)
if err != nil {
return err
}
if params == nil {
params = make(map[string]string)
}
if headers == nil {
headers = make(map[string][]string)
}
for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
if _err != nil {
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_INFO, "Unsupported extensionOptions")
}
}
switch method {
case HTTP_GET:
resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_POST:
resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_PUT:
resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_DELETE:
resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_HEAD:
resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_OPTIONS:
resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable)
default:
respError = errors.New("Unexpect http method error")
}
if respError == nil && output != nil {
respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
} else {
doLog(LEVEL_WARN, "Do http request with error: %v", respError)
}
if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}
return respError
}
func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
}
func prepareAgentHeader(clientUserAgent string) string {
userAgent := USER_AGENT
if clientUserAgent != "" {
userAgent = clientUserAgent
}
return userAgent
}
func (obsClient ObsClient) getSignedURLResponse(action string, output IBaseModel, xmlResult bool, resp *http.Response, err error, start int64) (respError error) {
var msg interface{}
if err != nil {
respError = err
resp = nil
} else {
doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
if resp.StatusCode >= 300 {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
msg = resp.Status
resp = nil
} else {
if output != nil {
respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
}
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
}
}
if msg != nil {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
}
if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}
return
}
func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
req, err := http.NewRequest(method, signedURL, data)
if err != nil {
return err
}
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
var resp *http.Response
var isSecurityToken bool
var securityToken string
var query []string
parmas := strings.Split(signedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:]
isSecurityToken = true
}
}
}
}
logSignedURL := signedURL
if isSecurityToken {
logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1)
}
doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL)
req.Header = actualSignedRequestHeaders
if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST_CAMEL)
} else if value, ok := req.Header[HEADER_HOST]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST)
}
if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
} else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH)
}
userAgent := prepareAgentHeader(obsClient.conf.userAgent)
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}
start := GetCurrentTimestamp()
resp, err = obsClient.httpClient.Do(req)
if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
}
respError = obsClient.getSignedURLResponse(action, output, xmlResult, resp, err, start)
return
}
func prepareData(headers map[string][]string, data interface{}) (io.Reader, error) {
var _data io.Reader
if data != nil {
if dataStr, ok := data.(string); ok {
doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr)
headers["Content-Length"] = []string{IntToString(len(dataStr))}
_data = strings.NewReader(dataStr)
} else if dataByte, ok := data.([]byte); ok {
doLog(LEVEL_DEBUG, "Do http request with byte array")
headers["Content-Length"] = []string{IntToString(len(dataByte))}
_data = bytes.NewReader(dataByte)
} else if dataReader, ok := data.(io.Reader); ok {
_data = dataReader
} else {
doLog(LEVEL_WARN, "Data is not a valid io.Reader")
return nil, errors.New("Data is not a valid io.Reader")
}
}
return _data, nil
}
func (obsClient ObsClient) getRequest(redirectURL, requestURL string, redirectFlag bool, _data io.Reader, method,
bucketName, objectKey string, params map[string]string, headers map[string][]string) (*http.Request, error) {
if redirectURL != "" {
if !redirectFlag {
parsedRedirectURL, err := url.Parse(redirectURL)
if err != nil {
return nil, err
}
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host)
if err != nil {
return nil, err
}
if parsedRequestURL, err := url.Parse(requestURL); err != nil {
return nil, err
} else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" {
redirectURL += "?" + parsedRequestURL.RawQuery
}
}
requestURL = redirectURL
} else {
var err error
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, requestURL, _data)
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
if err != nil {
return nil, err
}
doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method)
return req, nil
}
func logHeaders(headers map[string][]string, signature SignatureType) {
if isDebugLogEnabled() {
auth := headers[HEADER_AUTH_CAMEL]
delete(headers, HEADER_AUTH_CAMEL)
var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
} else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
}
doLog(LEVEL_DEBUG, "Request headers: %v", headers)
headers[HEADER_AUTH_CAMEL] = auth
if isSecurityToken {
if signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = securityToken
} else {
headers[HEADER_STS_TOKEN_AMZ] = securityToken
}
}
}
}
func prepareReq(headers map[string][]string, req, lastRequest *http.Request, clientUserAgent string) *http.Request {
for key, value := range headers {
if key == HEADER_HOST_CAMEL {
req.Host = value[0]
delete(headers, key)
} else if key == HEADER_CONTENT_LENGTH_CAMEL {
req.ContentLength = StringToInt64(value[0], -1)
delete(headers, key)
} else {
req.Header[key] = value
}
}
lastRequest = req
userAgent := prepareAgentHeader(clientUserAgent)
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}
if lastRequest != nil {
req.Host = lastRequest.Host
req.ContentLength = lastRequest.ContentLength
}
return lastRequest
}
func canNotRetry(repeatable bool, statusCode int) bool {
if !repeatable || (statusCode >= 400 && statusCode < 500) || statusCode == 304 {
return true
}
return false
}
func isRedirectErr(location string, redirectCount, maxRedirectCount int) bool {
if location != "" && redirectCount < maxRedirectCount {
return true
}
return false
}
func setRedirectFlag(statusCode int, method string) (redirectFlag bool) {
if statusCode == 302 && method == HTTP_GET {
redirectFlag = true
} else {
redirectFlag = false
}
return
}
func prepareRetry(resp *http.Response, headers map[string][]string, _data io.Reader, msg interface{}) (io.Reader, *http.Response, error) {
if resp != nil {
_err := resp.Body.Close()
checkAndLogErr(_err, LEVEL_WARN, "Failed to close resp body")
resp = nil
}
if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
delete(headers, HEADER_AUTH_CAMEL)
}
doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
if r, ok := _data.(*strings.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, nil, err
}
} else if r, ok := _data.(*bytes.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, nil, err
}
} else if r, ok := _data.(*fileReaderWrapper); ok {
fd, err := os.Open(r.filePath)
if err != nil {
return nil, nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
fileReaderWrapper.mark = r.mark
fileReaderWrapper.reader = fd
fileReaderWrapper.totalCount = r.totalCount
_data = fileReaderWrapper
_, err = fd.Seek(r.mark, 0)
if err != nil {
errMsg := fd.Close()
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
return nil, nil, err
}
} else if r, ok := _data.(*readerWrapper); ok {
_, err := r.seek(0, 0)
if err != nil {
return nil, nil, err
}
r.readedCount = 0
}
return _data, resp, nil
}
func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) {
bucketName = strings.TrimSpace(bucketName)
method = strings.ToUpper(method)
var redirectURL string
var requestURL string
maxRetryCount := obsClient.conf.maxRetryCount
maxRedirectCount := obsClient.conf.maxRedirectCount
_data, _err := prepareData(headers, data)
if _err != nil {
return nil, _err
}
var lastRequest *http.Request
redirectFlag := false
for i, redirectCount := 0, 0; i <= maxRetryCount; i++ {
req, err := obsClient.getRequest(redirectURL, requestURL, redirectFlag, _data,
method, bucketName, objectKey, params, headers)
if err != nil {
return nil, err
}
logHeaders(headers, obsClient.conf.signature)
lastRequest = prepareReq(headers, req, lastRequest, obsClient.conf.userAgent)
start := GetCurrentTimestamp()
resp, err = obsClient.httpClient.Do(req)
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
var msg interface{}
if err != nil {
msg = err
respError = err
resp = nil
if !repeatable {
break
}
} else {
doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
if resp.StatusCode < 300 {
respError = nil
break
} else if canNotRetry(repeatable, resp.StatusCode) {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
break
} else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
location := resp.Header.Get(HEADER_LOCATION_CAMEL)
if isRedirectErr(location, redirectCount, maxRedirectCount) {
redirectURL = location
doLog(LEVEL_WARN, "Redirect request to %s", redirectURL)
msg = resp.Status
maxRetryCount++
redirectCount++
redirectFlag = setRedirectFlag(resp.StatusCode, method)
} else {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
break
}
} else {
msg = resp.Status
}
}
if i != maxRetryCount {
_data, resp, err = prepareRetry(resp, headers, _data, msg)
if err != nil {
return nil, err
}
if r, ok := _data.(*fileReaderWrapper); ok {
if _fd, _ok := r.reader.(*os.File); _ok {
defer func() {
errMsg := _fd.Close()
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
}()
}
}
time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
} else {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
if resp != nil {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
}
}
}
return
}
type connDelegate struct {
conn net.Conn
socketTimeout time.Duration
finalTimeout time.Duration
}
func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
return &connDelegate{
conn: conn,
socketTimeout: time.Second * time.Duration(socketTimeout),
finalTimeout: time.Second * time.Duration(finalTimeout),
}
}
func (delegate *connDelegate) Read(b []byte) (n int, err error) {
setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
n, err = delegate.conn.Read(b)
setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}
func (delegate *connDelegate) Write(b []byte) (n int, err error) {
setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}
n, err = delegate.conn.Write(b)
finalTimeout := time.Now().Add(delegate.finalTimeout)
setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout)
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}
setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout)
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}
func (delegate *connDelegate) Close() error {
return delegate.conn.Close()
}
func (delegate *connDelegate) LocalAddr() net.Addr {
return delegate.conn.LocalAddr()
}
func (delegate *connDelegate) RemoteAddr() net.Addr {
return delegate.conn.RemoteAddr()
}
func (delegate *connDelegate) SetDeadline(t time.Time) error {
return delegate.conn.SetDeadline(t)
}
func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
return delegate.conn.SetReadDeadline(t)
}
func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
return delegate.conn.SetWriteDeadline(t)
}

@ -1,334 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
)
// Level defines the level of the log
type Level int
const (
LEVEL_OFF Level = 500
LEVEL_ERROR Level = 400
LEVEL_WARN Level = 300
LEVEL_INFO Level = 200
LEVEL_DEBUG Level = 100
)
var logLevelMap = map[Level]string{
LEVEL_OFF: "[OFF]: ",
LEVEL_ERROR: "[ERROR]: ",
LEVEL_WARN: "[WARN]: ",
LEVEL_INFO: "[INFO]: ",
LEVEL_DEBUG: "[DEBUG]: ",
}
type logConfType struct {
level Level
logToConsole bool
logFullPath string
maxLogSize int64
backups int
}
func getDefaultLogConf() logConfType {
return logConfType{
level: LEVEL_WARN,
logToConsole: false,
logFullPath: "",
maxLogSize: 1024 * 1024 * 30, //30MB
backups: 10,
}
}
var logConf logConfType
type loggerWrapper struct {
fullPath string
fd *os.File
ch chan string
wg sync.WaitGroup
queue []string
logger *log.Logger
index int
cacheCount int
closed bool
}
func (lw *loggerWrapper) doInit() {
lw.queue = make([]string, 0, lw.cacheCount)
lw.logger = log.New(lw.fd, "", 0)
lw.ch = make(chan string, lw.cacheCount)
lw.wg.Add(1)
go lw.doWrite()
}
func (lw *loggerWrapper) rotate() {
stat, err := lw.fd.Stat()
if err != nil {
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
panic(err)
}
if stat.Size() >= logConf.maxLogSize {
_err := lw.fd.Sync()
if _err != nil {
panic(err)
}
_err = lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
if lw.index > logConf.backups {
lw.index = 1
}
_err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
if _err != nil {
panic(err)
}
lw.index++
fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
panic(err)
}
lw.fd = fd
lw.logger.SetOutput(lw.fd)
}
}
func (lw *loggerWrapper) doFlush() {
lw.rotate()
for _, m := range lw.queue {
lw.logger.Println(m)
}
err := lw.fd.Sync()
if err != nil {
panic(err)
}
}
func (lw *loggerWrapper) doClose() {
lw.closed = true
close(lw.ch)
lw.wg.Wait()
}
func (lw *loggerWrapper) doWrite() {
defer lw.wg.Done()
for {
msg, ok := <-lw.ch
if !ok {
lw.doFlush()
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
break
}
if len(lw.queue) >= lw.cacheCount {
lw.doFlush()
lw.queue = make([]string, 0, lw.cacheCount)
}
lw.queue = append(lw.queue, msg)
}
}
func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
if !lw.closed {
msg := fmt.Sprintf(format, v...)
lw.ch <- msg
}
}
var consoleLogger *log.Logger
var fileLogger *loggerWrapper
var lock = new(sync.RWMutex)
func isDebugLogEnabled() bool {
return logConf.level <= LEVEL_DEBUG
}
func isErrorLogEnabled() bool {
return logConf.level <= LEVEL_ERROR
}
func isWarnLogEnabled() bool {
return logConf.level <= LEVEL_WARN
}
func isInfoLogEnabled() bool {
return logConf.level <= LEVEL_INFO
}
func reset() {
if fileLogger != nil {
fileLogger.doClose()
fileLogger = nil
}
consoleLogger = nil
logConf = getDefaultLogConf()
}
// InitLog enable logging function with default cacheCnt
func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error {
return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50)
}
// InitLogWithCacheCnt enable logging function
func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error {
lock.Lock()
defer lock.Unlock()
if cacheCnt <= 0 {
cacheCnt = 50
}
reset()
if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
_fullPath, err := filepath.Abs(fullPath)
if err != nil {
return err
}
if !strings.HasSuffix(_fullPath, ".log") {
_fullPath += ".log"
}
stat, fd, err := initLogFile(_fullPath)
if err != nil {
return err
}
prefix := stat.Name() + "."
index := 1
var timeIndex int64 = 0
walkFunc := func(path string, info os.FileInfo, err error) error {
if err == nil {
if name := info.Name(); strings.HasPrefix(name, prefix) {
if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex {
timeIndex = info.ModTime().Unix()
index = i + 1
}
}
}
return err
}
if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return err
}
fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false}
fileLogger.doInit()
}
if maxLogSize > 0 {
logConf.maxLogSize = maxLogSize
}
if backups > 0 {
logConf.backups = backups
}
logConf.level = level
if logToConsole {
consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
}
return nil
}
func initLogFile(_fullPath string) (os.FileInfo, *os.File, error) {
stat, err := os.Stat(_fullPath)
if err == nil && stat.IsDir() {
return nil, nil, fmt.Errorf("logFullPath:[%s] is a directory", _fullPath)
} else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
return nil, nil, err
}
fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return nil, nil, err
}
if stat == nil {
stat, err = os.Stat(_fullPath)
if err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return nil, nil, err
}
}
return stat, fd, nil
}
// CloseLog disable logging and synchronize cache data to log files
func CloseLog() {
if logEnabled() {
lock.Lock()
defer lock.Unlock()
reset()
}
}
func logEnabled() bool {
return consoleLogger != nil || fileLogger != nil
}
// DoLog writes log messages to the logger
func DoLog(level Level, format string, v ...interface{}) {
doLog(level, format, v...)
}
func doLog(level Level, format string, v ...interface{}) {
if logEnabled() && logConf.level <= level {
msg := fmt.Sprintf(format, v...)
if _, file, line, ok := runtime.Caller(1); ok {
index := strings.LastIndex(file, "/")
if index >= 0 {
file = file[index+1:]
}
msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
}
prefix := logLevelMap[level]
if consoleLogger != nil {
consoleLogger.Printf("%s%s", prefix, msg)
}
if fileLogger != nil {
nowDate := FormatUtcNow("2006-01-02T15:04:05Z")
fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
}
}
}
func checkAndLogErr(err error, level Level, format string, v ...interface{}) {
if err != nil {
doLog(level, format, v...)
}
}

@ -1,363 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
"time"
)
// Bucket defines bucket properties
type Bucket struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"`
CreationDate time.Time `xml:"CreationDate"`
Location string `xml:"Location"`
BucketType string `xml:"BucketType,omitempty"`
}
// Owner defines owner properties
type Owner struct {
XMLName xml.Name `xml:"Owner"`
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName,omitempty"`
}
// Initiator defines initiator properties
type Initiator struct {
XMLName xml.Name `xml:"Initiator"`
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName,omitempty"`
}
type bucketLocationObs struct {
XMLName xml.Name `xml:"Location"`
Location string `xml:",chardata"`
}
// BucketLocation defines bucket location configuration
type BucketLocation struct {
XMLName xml.Name `xml:"CreateBucketConfiguration"`
Location string `xml:"LocationConstraint,omitempty"`
}
// BucketStoragePolicy defines the bucket storage class
type BucketStoragePolicy struct {
XMLName xml.Name `xml:"StoragePolicy"`
StorageClass StorageClassType `xml:"DefaultStorageClass"`
}
type bucketStoragePolicyObs struct {
XMLName xml.Name `xml:"StorageClass"`
StorageClass string `xml:",chardata"`
}
// Content defines the object content properties
type Content struct {
XMLName xml.Name `xml:"Contents"`
Owner Owner `xml:"Owner"`
ETag string `xml:"ETag"`
Key string `xml:"Key"`
LastModified time.Time `xml:"LastModified"`
Size int64 `xml:"Size"`
StorageClass StorageClassType `xml:"StorageClass"`
}
// Version defines the properties of versioning objects
type Version struct {
DeleteMarker
XMLName xml.Name `xml:"Version"`
ETag string `xml:"ETag"`
Size int64 `xml:"Size"`
}
// DeleteMarker defines the properties of versioning delete markers
type DeleteMarker struct {
XMLName xml.Name `xml:"DeleteMarker"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
IsLatest bool `xml:"IsLatest"`
LastModified time.Time `xml:"LastModified"`
Owner Owner `xml:"Owner"`
StorageClass StorageClassType `xml:"StorageClass"`
}
// Upload defines multipart upload properties
type Upload struct {
XMLName xml.Name `xml:"Upload"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
Initiated time.Time `xml:"Initiated"`
StorageClass StorageClassType `xml:"StorageClass"`
Owner Owner `xml:"Owner"`
Initiator Initiator `xml:"Initiator"`
}
// BucketQuota defines bucket quota configuration
type BucketQuota struct {
XMLName xml.Name `xml:"Quota"`
Quota int64 `xml:"StorageQuota"`
}
// Grantee defines grantee properties
type Grantee struct {
XMLName xml.Name `xml:"Grantee"`
Type GranteeType `xml:"type,attr"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
URI GroupUriType `xml:"URI,omitempty"`
}
type granteeObs struct {
XMLName xml.Name `xml:"Grantee"`
Type GranteeType `xml:"type,attr"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
Canned string `xml:"Canned,omitempty"`
}
// Grant defines grant properties
type Grant struct {
XMLName xml.Name `xml:"Grant"`
Grantee Grantee `xml:"Grantee"`
Permission PermissionType `xml:"Permission"`
Delivered bool `xml:"Delivered"`
}
type grantObs struct {
XMLName xml.Name `xml:"Grant"`
Grantee granteeObs `xml:"Grantee"`
Permission PermissionType `xml:"Permission"`
Delivered bool `xml:"Delivered"`
}
// AccessControlPolicy defines access control policy properties
type AccessControlPolicy struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
Grants []Grant `xml:"AccessControlList>Grant"`
Delivered string `xml:"Delivered,omitempty"`
}
type accessControlPolicyObs struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
Grants []grantObs `xml:"AccessControlList>Grant"`
}
// CorsRule defines the CORS rules
type CorsRule struct {
XMLName xml.Name `xml:"CORSRule"`
ID string `xml:"ID,omitempty"`
AllowedOrigin []string `xml:"AllowedOrigin"`
AllowedMethod []string `xml:"AllowedMethod"`
AllowedHeader []string `xml:"AllowedHeader,omitempty"`
MaxAgeSeconds int `xml:"MaxAgeSeconds"`
ExposeHeader []string `xml:"ExposeHeader,omitempty"`
}
// BucketCors defines the bucket CORS configuration
type BucketCors struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CorsRules []CorsRule `xml:"CORSRule"`
}
// BucketVersioningConfiguration defines the versioning configuration
type BucketVersioningConfiguration struct {
XMLName xml.Name `xml:"VersioningConfiguration"`
Status VersioningStatusType `xml:"Status"`
}
// IndexDocument defines the default page configuration
type IndexDocument struct {
Suffix string `xml:"Suffix"`
}
// ErrorDocument defines the error page configuration
type ErrorDocument struct {
Key string `xml:"Key,omitempty"`
}
// Condition defines condition in RoutingRule
type Condition struct {
XMLName xml.Name `xml:"Condition"`
KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"`
HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"`
}
// Redirect defines redirect in RoutingRule
type Redirect struct {
XMLName xml.Name `xml:"Redirect"`
Protocol ProtocolType `xml:"Protocol,omitempty"`
HostName string `xml:"HostName,omitempty"`
ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"`
ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"`
HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"`
}
// RoutingRule defines routing rules
type RoutingRule struct {
XMLName xml.Name `xml:"RoutingRule"`
Condition Condition `xml:"Condition,omitempty"`
Redirect Redirect `xml:"Redirect"`
}
// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration
type RedirectAllRequestsTo struct {
XMLName xml.Name `xml:"RedirectAllRequestsTo"`
Protocol ProtocolType `xml:"Protocol,omitempty"`
HostName string `xml:"HostName"`
}
// BucketWebsiteConfiguration defines the bucket website configuration
type BucketWebsiteConfiguration struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
IndexDocument IndexDocument `xml:"IndexDocument,omitempty"`
ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"`
RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
}
// BucketLoggingStatus defines the bucket logging configuration
type BucketLoggingStatus struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
Agency string `xml:"Agency,omitempty"`
TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"`
TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"`
TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"`
}
// Transition defines transition property in LifecycleRule
type Transition struct {
XMLName xml.Name `xml:"Transition"`
Date time.Time `xml:"Date,omitempty"`
Days int `xml:"Days,omitempty"`
StorageClass StorageClassType `xml:"StorageClass"`
}
// Expiration defines expiration property in LifecycleRule
type Expiration struct {
XMLName xml.Name `xml:"Expiration"`
Date time.Time `xml:"Date,omitempty"`
Days int `xml:"Days,omitempty"`
}
// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule
type NoncurrentVersionTransition struct {
XMLName xml.Name `xml:"NoncurrentVersionTransition"`
NoncurrentDays int `xml:"NoncurrentDays"`
StorageClass StorageClassType `xml:"StorageClass"`
}
// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule
type NoncurrentVersionExpiration struct {
XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
NoncurrentDays int `xml:"NoncurrentDays"`
}
// LifecycleRule defines lifecycle rule
type LifecycleRule struct {
ID string `xml:"ID,omitempty"`
Prefix string `xml:"Prefix"`
Status RuleStatusType `xml:"Status"`
Transitions []Transition `xml:"Transition,omitempty"`
Expiration Expiration `xml:"Expiration,omitempty"`
NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
}
// BucketEncryptionConfiguration defines the bucket encryption configuration
type BucketEncryptionConfiguration struct {
XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
SSEAlgorithm string `xml:"Rule>ApplyServerSideEncryptionByDefault>SSEAlgorithm"`
KMSMasterKeyID string `xml:"Rule>ApplyServerSideEncryptionByDefault>KMSMasterKeyID,omitempty"`
ProjectID string `xml:"Rule>ApplyServerSideEncryptionByDefault>ProjectID,omitempty"`
}
// Tag defines tag property in BucketTagging
type Tag struct {
XMLName xml.Name `xml:"Tag"`
Key string `xml:"Key"`
Value string `xml:"Value"`
}
// BucketTagging defines the bucket tag configuration
type BucketTagging struct {
XMLName xml.Name `xml:"Tagging"`
Tags []Tag `xml:"TagSet>Tag"`
}
// FilterRule defines filter rule in TopicConfiguration
type FilterRule struct {
XMLName xml.Name `xml:"FilterRule"`
Name string `xml:"Name,omitempty"`
Value string `xml:"Value,omitempty"`
}
// TopicConfiguration defines the topic configuration
type TopicConfiguration struct {
XMLName xml.Name `xml:"TopicConfiguration"`
ID string `xml:"Id,omitempty"`
Topic string `xml:"Topic"`
Events []EventType `xml:"Event"`
FilterRules []FilterRule `xml:"Filter>Object>FilterRule"`
}
// BucketNotification defines the bucket notification configuration
type BucketNotification struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"`
}
type topicConfigurationS3 struct {
XMLName xml.Name `xml:"TopicConfiguration"`
ID string `xml:"Id,omitempty"`
Topic string `xml:"Topic"`
Events []string `xml:"Event"`
FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"`
}
type bucketNotificationS3 struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"`
}
// ObjectToDelete defines the object property in DeleteObjectsInput
type ObjectToDelete struct {
XMLName xml.Name `xml:"Object"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId,omitempty"`
}
// Deleted defines the deleted property in DeleteObjectsOutput
type Deleted struct {
XMLName xml.Name `xml:"Deleted"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
DeleteMarker bool `xml:"DeleteMarker"`
DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"`
}
// Part defines the part properties
type Part struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
LastModified time.Time `xml:"LastModified,omitempty"`
Size int64 `xml:"Size,omitempty"`
}
// BucketPayer defines the request payment configuration
type BucketPayer struct {
XMLName xml.Name `xml:"RequestPaymentConfiguration"`
Payer PayerType `xml:"Payer"`
}

@ -1,365 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
)
// ListBucketsInput is the input parameter of ListBuckets function
type ListBucketsInput struct {
QueryLocation bool
BucketType BucketType
}
// ListBucketsOutput is the result of ListBuckets function
type ListBucketsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Owner Owner `xml:"Owner"`
Buckets []Bucket `xml:"Buckets>Bucket"`
}
// CreateBucketInput is the input parameter of CreateBucket function
type CreateBucketInput struct {
BucketLocation
Bucket string `xml:"-"`
ACL AclType `xml:"-"`
StorageClass StorageClassType `xml:"-"`
GrantReadId string `xml:"-"`
GrantWriteId string `xml:"-"`
GrantReadAcpId string `xml:"-"`
GrantWriteAcpId string `xml:"-"`
GrantFullControlId string `xml:"-"`
GrantReadDeliveredId string `xml:"-"`
GrantFullControlDeliveredId string `xml:"-"`
Epid string `xml:"-"`
AvailableZone string `xml:"-"`
IsFSFileInterface bool `xml:"-"`
}
// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function
type SetBucketStoragePolicyInput struct {
Bucket string `xml:"-"`
BucketStoragePolicy
}
type getBucketStoragePolicyOutputS3 struct {
BaseModel
BucketStoragePolicy
}
// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function
type GetBucketStoragePolicyOutput struct {
BaseModel
StorageClass string
}
type getBucketStoragePolicyOutputObs struct {
BaseModel
bucketStoragePolicyObs
}
// SetBucketQuotaInput is the input parameter of SetBucketQuota function
type SetBucketQuotaInput struct {
Bucket string `xml:"-"`
BucketQuota
}
// GetBucketQuotaOutput is the result of GetBucketQuota function
type GetBucketQuotaOutput struct {
BaseModel
BucketQuota
}
// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function
type GetBucketStorageInfoOutput struct {
BaseModel
XMLName xml.Name `xml:"GetBucketStorageInfoResult"`
Size int64 `xml:"Size"`
ObjectNumber int `xml:"ObjectNumber"`
}
type getBucketLocationOutputS3 struct {
BaseModel
BucketLocation
}
type getBucketLocationOutputObs struct {
BaseModel
bucketLocationObs
}
// GetBucketLocationOutput is the result of GetBucketLocation function
type GetBucketLocationOutput struct {
BaseModel
Location string `xml:"-"`
}
// GetBucketAclOutput is the result of GetBucketAcl function
type GetBucketAclOutput struct {
BaseModel
AccessControlPolicy
}
type getBucketACLOutputObs struct {
BaseModel
accessControlPolicyObs
}
// SetBucketAclInput is the input parameter of SetBucketAcl function
type SetBucketAclInput struct {
Bucket string `xml:"-"`
ACL AclType `xml:"-"`
AccessControlPolicy
}
// SetBucketPolicyInput is the input parameter of SetBucketPolicy function
type SetBucketPolicyInput struct {
Bucket string
Policy string
}
// GetBucketPolicyOutput is the result of GetBucketPolicy function
type GetBucketPolicyOutput struct {
BaseModel
Policy string `json:"body"`
}
// SetBucketCorsInput is the input parameter of SetBucketCors function
type SetBucketCorsInput struct {
Bucket string `xml:"-"`
BucketCors
}
// GetBucketCorsOutput is the result of GetBucketCors function
type GetBucketCorsOutput struct {
BaseModel
BucketCors
}
// SetBucketVersioningInput is the input parameter of SetBucketVersioning function
type SetBucketVersioningInput struct {
Bucket string `xml:"-"`
BucketVersioningConfiguration
}
// GetBucketVersioningOutput is the result of GetBucketVersioning function
type GetBucketVersioningOutput struct {
BaseModel
BucketVersioningConfiguration
}
// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function
type SetBucketWebsiteConfigurationInput struct {
Bucket string `xml:"-"`
BucketWebsiteConfiguration
}
// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function
type GetBucketWebsiteConfigurationOutput struct {
BaseModel
BucketWebsiteConfiguration
}
// GetBucketMetadataInput is the input parameter of GetBucketMetadata function
type GetBucketMetadataInput struct {
Bucket string
Origin string
RequestHeader string
}
// SetObjectMetadataInput is the input parameter of SetObjectMetadata function
type SetObjectMetadataInput struct {
Bucket string
Key string
VersionId string
MetadataDirective MetadataDirectiveType
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
Expires string
WebsiteRedirectLocation string
StorageClass StorageClassType
Metadata map[string]string
}
//SetObjectMetadataOutput is the result of SetObjectMetadata function
type SetObjectMetadataOutput struct {
BaseModel
MetadataDirective MetadataDirectiveType
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
Expires string
WebsiteRedirectLocation string
StorageClass StorageClassType
Metadata map[string]string
}
// GetBucketMetadataOutput is the result of GetBucketMetadata function
type GetBucketMetadataOutput struct {
BaseModel
StorageClass StorageClassType
Location string
Version string
AllowOrigin string
AllowMethod string
AllowHeader string
MaxAgeSeconds int
ExposeHeader string
Epid string
AZRedundancy string
FSStatus FSStatusType
}
// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function
type SetBucketLoggingConfigurationInput struct {
Bucket string `xml:"-"`
BucketLoggingStatus
}
// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function
type GetBucketLoggingConfigurationOutput struct {
BaseModel
BucketLoggingStatus
}
// BucketLifecyleConfiguration defines the bucket lifecycle configuration
type BucketLifecyleConfiguration struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
LifecycleRules []LifecycleRule `xml:"Rule"`
}
// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function
type SetBucketLifecycleConfigurationInput struct {
Bucket string `xml:"-"`
BucketLifecyleConfiguration
}
// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function
type GetBucketLifecycleConfigurationOutput struct {
BaseModel
BucketLifecyleConfiguration
}
// SetBucketEncryptionInput is the input parameter of SetBucketEncryption function
type SetBucketEncryptionInput struct {
Bucket string `xml:"-"`
BucketEncryptionConfiguration
}
// GetBucketEncryptionOutput is the result of GetBucketEncryption function
type GetBucketEncryptionOutput struct {
BaseModel
BucketEncryptionConfiguration
}
// SetBucketTaggingInput is the input parameter of SetBucketTagging function
type SetBucketTaggingInput struct {
Bucket string `xml:"-"`
BucketTagging
}
// GetBucketTaggingOutput is the result of GetBucketTagging function
type GetBucketTaggingOutput struct {
BaseModel
BucketTagging
}
// SetBucketNotificationInput is the input parameter of SetBucketNotification function
type SetBucketNotificationInput struct {
Bucket string `xml:"-"`
BucketNotification
}
type getBucketNotificationOutputS3 struct {
BaseModel
bucketNotificationS3
}
// GetBucketNotificationOutput is the result of GetBucketNotification function
type GetBucketNotificationOutput struct {
BaseModel
BucketNotification
}
// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function
type SetBucketFetchPolicyInput struct {
Bucket string
Status FetchPolicyStatusType `json:"status"`
Agency string `json:"agency"`
}
// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function
type GetBucketFetchPolicyInput struct {
Bucket string
}
// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function
type GetBucketFetchPolicyOutput struct {
BaseModel
FetchResponse `json:"fetch"`
}
// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function
type DeleteBucketFetchPolicyInput struct {
Bucket string
}
// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function
type SetBucketFetchJobInput struct {
Bucket string `json:"bucket"`
URL string `json:"url"`
Host string `json:"host,omitempty"`
Key string `json:"key,omitempty"`
Md5 string `json:"md5,omitempty"`
CallBackURL string `json:"callbackurl,omitempty"`
CallBackBody string `json:"callbackbody,omitempty"`
CallBackBodyType string `json:"callbackbodytype,omitempty"`
CallBackHost string `json:"callbackhost,omitempty"`
FileType string `json:"file_type,omitempty"`
IgnoreSameKey bool `json:"ignore_same_key,omitempty"`
ObjectHeaders map[string]string `json:"objectheaders,omitempty"`
Etag string `json:"etag,omitempty"`
TrustName string `json:"trustname,omitempty"`
}
// SetBucketFetchJobOutput is the result of SetBucketFetchJob function
type SetBucketFetchJobOutput struct {
BaseModel
SetBucketFetchJobResponse
}
// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function
type GetBucketFetchJobInput struct {
Bucket string
JobID string
}
// GetBucketFetchJobOutput is the result of GetBucketFetchJob function
type GetBucketFetchJobOutput struct {
BaseModel
GetBucketFetchJobResponse
}
type GetBucketFSStatusInput struct {
GetBucketMetadataInput
}
type GetBucketFSStatusOutput struct {
GetBucketMetadataOutput
FSStatus FSStatusType
}

@ -1,32 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
// ISseHeader defines the sse encryption header
type ISseHeader interface {
GetEncryption() string
GetKey() string
}
// SseKmsHeader defines the SseKms header
type SseKmsHeader struct {
Encryption string
Key string
isObs bool
}
// SseCHeader defines the SseC header
type SseCHeader struct {
Encryption string
Key string
KeyMD5 string
}

@ -1,388 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
"io"
"time"
)
// ListObjsInput defines parameters for listing objects
type ListObjsInput struct {
Prefix string
MaxKeys int
Delimiter string
Origin string
RequestHeader string
EncodingType string
}
// ListObjectsInput is the input parameter of ListObjects function
type ListObjectsInput struct {
ListObjsInput
Bucket string
Marker string
}
// ListObjectsOutput is the result of ListObjects function
type ListObjectsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListBucketResult"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxKeys int `xml:"MaxKeys"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Contents []Content `xml:"Contents"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
Location string `xml:"-"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// ListVersionsInput is the input parameter of ListVersions function
type ListVersionsInput struct {
ListObjsInput
Bucket string
KeyMarker string
VersionIdMarker string
}
// ListVersionsOutput is the result of ListVersions function
type ListVersionsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListVersionsResult"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
KeyMarker string `xml:"KeyMarker"`
NextKeyMarker string `xml:"NextKeyMarker"`
VersionIdMarker string `xml:"VersionIdMarker"`
NextVersionIdMarker string `xml:"NextVersionIdMarker"`
MaxKeys int `xml:"MaxKeys"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Versions []Version `xml:"Version"`
DeleteMarkers []DeleteMarker `xml:"DeleteMarker"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
Location string `xml:"-"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// DeleteObjectInput is the input parameter of DeleteObject function
type DeleteObjectInput struct {
Bucket string
Key string
VersionId string
}
// DeleteObjectOutput is the result of DeleteObject function
type DeleteObjectOutput struct {
BaseModel
VersionId string
DeleteMarker bool
}
// DeleteObjectsInput is the input parameter of DeleteObjects function
type DeleteObjectsInput struct {
Bucket string `xml:"-"`
XMLName xml.Name `xml:"Delete"`
Quiet bool `xml:"Quiet,omitempty"`
Objects []ObjectToDelete `xml:"Object"`
EncodingType string `xml:"EncodingType"`
}
// DeleteObjectsOutput is the result of DeleteObjects function
type DeleteObjectsOutput struct {
BaseModel
XMLName xml.Name `xml:"DeleteResult"`
Deleteds []Deleted `xml:"Deleted"`
Errors []Error `xml:"Error"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// SetObjectAclInput is the input parameter of SetObjectAcl function
type SetObjectAclInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
VersionId string `xml:"-"`
ACL AclType `xml:"-"`
AccessControlPolicy
}
// GetObjectAclInput is the input parameter of GetObjectAcl function
type GetObjectAclInput struct {
Bucket string
Key string
VersionId string
}
// GetObjectAclOutput is the result of GetObjectAcl function
type GetObjectAclOutput struct {
BaseModel
VersionId string
AccessControlPolicy
}
// RestoreObjectInput is the input parameter of RestoreObject function
type RestoreObjectInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
VersionId string `xml:"-"`
XMLName xml.Name `xml:"RestoreRequest"`
Days int `xml:"Days"`
Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"`
}
// GetObjectMetadataInput is the input parameter of GetObjectMetadata function
type GetObjectMetadataInput struct {
Bucket string
Key string
VersionId string
Origin string
RequestHeader string
SseHeader ISseHeader
}
// GetObjectMetadataOutput is the result of GetObjectMetadata function
type GetObjectMetadataOutput struct {
BaseModel
VersionId string
WebsiteRedirectLocation string
Expiration string
Restore string
ObjectType string
NextAppendPosition string
StorageClass StorageClassType
ContentLength int64
ContentType string
ETag string
AllowOrigin string
AllowHeader string
AllowMethod string
ExposeHeader string
MaxAgeSeconds int
LastModified time.Time
SseHeader ISseHeader
Metadata map[string]string
}
type GetAttributeInput struct {
GetObjectMetadataInput
RequestPayer string
}
type GetAttributeOutput struct {
GetObjectMetadataOutput
Mode int
}
// GetObjectInput is the input parameter of GetObject function
type GetObjectInput struct {
GetObjectMetadataInput
IfMatch string
IfNoneMatch string
IfUnmodifiedSince time.Time
IfModifiedSince time.Time
RangeStart int64
RangeEnd int64
ImageProcess string
ResponseCacheControl string
ResponseContentDisposition string
ResponseContentEncoding string
ResponseContentLanguage string
ResponseContentType string
ResponseExpires string
}
// GetObjectOutput is the result of GetObject function
type GetObjectOutput struct {
GetObjectMetadataOutput
DeleteMarker bool
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
Expires string
Body io.ReadCloser
}
// ObjectOperationInput defines the object operation properties
type ObjectOperationInput struct {
Bucket string
Key string
ACL AclType
GrantReadId string
GrantReadAcpId string
GrantWriteAcpId string
GrantFullControlId string
StorageClass StorageClassType
WebsiteRedirectLocation string
Expires int64
SseHeader ISseHeader
Metadata map[string]string
}
// PutObjectBasicInput defines the basic object operation properties
type PutObjectBasicInput struct {
ObjectOperationInput
ContentType string
ContentMD5 string
ContentLength int64
ContentEncoding string
}
// PutObjectInput is the input parameter of PutObject function
type PutObjectInput struct {
PutObjectBasicInput
Body io.Reader
}
type NewFolderInput struct {
ObjectOperationInput
RequestPayer string
}
type NewFolderOutput struct {
PutObjectOutput
}
// PutFileInput is the input parameter of PutFile function
type PutFileInput struct {
PutObjectBasicInput
SourceFile string
}
// PutObjectOutput is the result of PutObject function
type PutObjectOutput struct {
BaseModel
VersionId string
SseHeader ISseHeader
StorageClass StorageClassType
ETag string
ObjectUrl string
}
// CopyObjectInput is the input parameter of CopyObject function
type CopyObjectInput struct {
ObjectOperationInput
CopySourceBucket string
CopySourceKey string
CopySourceVersionId string
CopySourceIfMatch string
CopySourceIfNoneMatch string
CopySourceIfUnmodifiedSince time.Time
CopySourceIfModifiedSince time.Time
SourceSseHeader ISseHeader
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
Expires string
MetadataDirective MetadataDirectiveType
SuccessActionRedirect string
}
// CopyObjectOutput is the result of CopyObject function
type CopyObjectOutput struct {
BaseModel
CopySourceVersionId string `xml:"-"`
VersionId string `xml:"-"`
SseHeader ISseHeader `xml:"-"`
XMLName xml.Name `xml:"CopyObjectResult"`
LastModified time.Time `xml:"LastModified"`
ETag string `xml:"ETag"`
}
// UploadFileInput is the input parameter of UploadFile function
type UploadFileInput struct {
ObjectOperationInput
ContentType string
UploadFile string
PartSize int64
TaskNum int
EnableCheckpoint bool
CheckpointFile string
EncodingType string
}
// DownloadFileInput is the input parameter of DownloadFile function
type DownloadFileInput struct {
GetObjectMetadataInput
IfMatch string
IfNoneMatch string
IfModifiedSince time.Time
IfUnmodifiedSince time.Time
DownloadFile string
PartSize int64
TaskNum int
EnableCheckpoint bool
CheckpointFile string
}
type AppendObjectInput struct {
PutObjectBasicInput
Body io.Reader
Position int64
}
type AppendObjectOutput struct {
BaseModel
VersionId string
SseHeader ISseHeader
NextAppendPosition int64
ETag string
}
type ModifyObjectInput struct {
Bucket string
Key string
Position int64
Body io.Reader
ContentLength int64
}
type ModifyObjectOutput struct {
BaseModel
ETag string
}
// HeadObjectInput is the input parameter of HeadObject function
type HeadObjectInput struct {
Bucket string
Key string
VersionId string
}
type RenameFileInput struct {
Bucket string
Key string
NewObjectKey string
RequestPayer string
}
type RenameFileOutput struct {
BaseModel
}
type RenameFolderInput struct {
Bucket string
Key string
NewObjectKey string
RequestPayer string
}
type RenameFolderOutput struct {
BaseModel
}

@ -1,63 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"net/http"
)
// CreateSignedUrlInput is the input parameter of CreateSignedUrl function
type CreateSignedUrlInput struct {
Method HttpMethodType
Bucket string
Key string
SubResource SubResourceType
Expires int
Headers map[string]string
QueryParams map[string]string
}
// CreateSignedUrlOutput is the result of CreateSignedUrl function
type CreateSignedUrlOutput struct {
SignedUrl string
ActualSignedRequestHeaders http.Header
}
// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function.
type CreateBrowserBasedSignatureInput struct {
Bucket string
Key string
Expires int
FormParams map[string]string
}
// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function.
type CreateBrowserBasedSignatureOutput struct {
OriginPolicy string
Policy string
Algorithm string
Credential string
Date string
Signature string
}
// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function
type SetBucketRequestPaymentInput struct {
Bucket string `xml:"-"`
BucketPayer
}
// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function
type GetBucketRequestPaymentOutput struct {
BaseModel
BucketPayer
}

@ -1,170 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
"io"
"time"
)
// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function
type ListMultipartUploadsInput struct {
Bucket string
Prefix string
MaxUploads int
Delimiter string
KeyMarker string
UploadIdMarker string
EncodingType string
}
// ListMultipartUploadsOutput is the result of ListMultipartUploads function
type ListMultipartUploadsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
Bucket string `xml:"Bucket"`
KeyMarker string `xml:"KeyMarker"`
NextKeyMarker string `xml:"NextKeyMarker"`
UploadIdMarker string `xml:"UploadIdMarker"`
NextUploadIdMarker string `xml:"NextUploadIdMarker"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
MaxUploads int `xml:"MaxUploads"`
Prefix string `xml:"Prefix"`
Uploads []Upload `xml:"Upload"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function
type AbortMultipartUploadInput struct {
Bucket string
Key string
UploadId string
}
// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function
type InitiateMultipartUploadInput struct {
ObjectOperationInput
ContentType string
EncodingType string
}
// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function
type InitiateMultipartUploadOutput struct {
BaseModel
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
SseHeader ISseHeader
EncodingType string `xml:"EncodingType,omitempty"`
}
// UploadPartInput is the input parameter of UploadPart function
type UploadPartInput struct {
Bucket string
Key string
PartNumber int
UploadId string
ContentMD5 string
SseHeader ISseHeader
Body io.Reader
SourceFile string
Offset int64
PartSize int64
}
// UploadPartOutput is the result of UploadPart function
type UploadPartOutput struct {
BaseModel
PartNumber int
ETag string
SseHeader ISseHeader
}
// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function
type CompleteMultipartUploadInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
UploadId string `xml:"-"`
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []Part `xml:"Part"`
EncodingType string `xml:"-"`
}
// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function
type CompleteMultipartUploadOutput struct {
BaseModel
VersionId string `xml:"-"`
SseHeader ISseHeader `xml:"-"`
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Location string `xml:"Location"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
ETag string `xml:"ETag"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// ListPartsInput is the input parameter of ListParts function
type ListPartsInput struct {
Bucket string
Key string
UploadId string
MaxParts int
PartNumberMarker int
EncodingType string
}
// ListPartsOutput is the result of ListParts function
type ListPartsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListPartsResult"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
PartNumberMarker int `xml:"PartNumberMarker"`
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
MaxParts int `xml:"MaxParts"`
IsTruncated bool `xml:"IsTruncated"`
StorageClass StorageClassType `xml:"StorageClass"`
Initiator Initiator `xml:"Initiator"`
Owner Owner `xml:"Owner"`
Parts []Part `xml:"Part"`
EncodingType string `xml:"EncodingType,omitempty"`
}
// CopyPartInput is the input parameter of CopyPart function
type CopyPartInput struct {
Bucket string
Key string
UploadId string
PartNumber int
CopySourceBucket string
CopySourceKey string
CopySourceVersionId string
CopySourceRangeStart int64
CopySourceRangeEnd int64
SseHeader ISseHeader
SourceSseHeader ISseHeader
}
// CopyPartOutput is the result of CopyPart function
type CopyPartOutput struct {
BaseModel
XMLName xml.Name `xml:"CopyPartResult"`
PartNumber int `xml:"-"`
ETag string `xml:"ETag"`
LastModified time.Time `xml:"LastModified"`
SseHeader ISseHeader `xml:"-"`
}

@ -1,67 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/xml"
)
// BaseModel defines base model response from OBS
type BaseModel struct {
StatusCode int `xml:"-"`
RequestId string `xml:"RequestId" json:"request_id"`
ResponseHeaders map[string][]string `xml:"-"`
}
// Error defines the error property in DeleteObjectsOutput
type Error struct {
XMLName xml.Name `xml:"Error"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}
// FetchResponse defines the response fetch policy configuration
type FetchResponse struct {
Status FetchPolicyStatusType `json:"status"`
Agency string `json:"agency"`
}
// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration
type SetBucketFetchJobResponse struct {
ID string `json:"id"`
Wait int `json:"Wait"`
}
// GetBucketFetchJobResponse defines the response fetch job configuration
type GetBucketFetchJobResponse struct {
Err string `json:"err"`
Code string `json:"code"`
Status string `json:"status"`
Job JobResponse `json:"job"`
}
// JobResponse defines the response job configuration
type JobResponse struct {
Bucket string `json:"bucket"`
URL string `json:"url"`
Host string `json:"host"`
Key string `json:"key"`
Md5 string `json:"md5"`
CallBackURL string `json:"callbackurl"`
CallBackBody string `json:"callbackbody"`
CallBackBodyType string `json:"callbackbodytype"`
CallBackHost string `json:"callbackhost"`
FileType string `json:"file_type"`
IgnoreSameKey bool `json:"ignore_same_key"`
}

@ -1,542 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//nolint:structcheck, unused
package obs
import (
"errors"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
)
// Future defines interface with function: Get
type Future interface {
Get() interface{}
}
// FutureResult for task result
type FutureResult struct {
result interface{}
resultChan chan interface{}
lock sync.Mutex
}
type panicResult struct {
presult interface{}
}
func (f *FutureResult) checkPanic() interface{} {
if r, ok := f.result.(panicResult); ok {
panic(r.presult)
}
return f.result
}
// Get gets the task result
func (f *FutureResult) Get() interface{} {
if f.resultChan == nil {
return f.checkPanic()
}
f.lock.Lock()
defer f.lock.Unlock()
if f.resultChan == nil {
return f.checkPanic()
}
f.result = <-f.resultChan
close(f.resultChan)
f.resultChan = nil
return f.checkPanic()
}
// Task defines interface with function: Run
type Task interface {
Run() interface{}
}
type funcWrapper struct {
f func() interface{}
}
func (fw *funcWrapper) Run() interface{} {
if fw.f != nil {
return fw.f()
}
return nil
}
type taskWrapper struct {
t Task
f *FutureResult
}
func (tw *taskWrapper) Run() interface{} {
if tw.t != nil {
return tw.t.Run()
}
return nil
}
type signalTask struct {
id string
}
func (signalTask) Run() interface{} {
return nil
}
type worker struct {
name string
taskQueue chan Task
wg *sync.WaitGroup
pool *RoutinePool
}
func runTask(t Task) {
if tw, ok := t.(*taskWrapper); ok {
defer func() {
if r := recover(); r != nil {
tw.f.resultChan <- panicResult{
presult: r,
}
}
}()
ret := t.Run()
tw.f.resultChan <- ret
} else {
t.Run()
}
}
func (*worker) runTask(t Task) {
runTask(t)
}
func (w *worker) start() {
go func() {
defer func() {
if w.wg != nil {
w.wg.Done()
}
}()
for {
task, ok := <-w.taskQueue
if !ok {
break
}
w.pool.AddCurrentWorkingCnt(1)
w.runTask(task)
w.pool.AddCurrentWorkingCnt(-1)
if w.pool.autoTuneWorker(w) {
break
}
}
}()
}
func (w *worker) release() {
w.taskQueue = nil
w.wg = nil
w.pool = nil
}
// Pool defines coroutine pool interface
type Pool interface {
ShutDown()
Submit(t Task) (Future, error)
SubmitFunc(f func() interface{}) (Future, error)
Execute(t Task)
ExecuteFunc(f func() interface{})
GetMaxWorkerCnt() int64
AddMaxWorkerCnt(value int64) int64
GetCurrentWorkingCnt() int64
AddCurrentWorkingCnt(value int64) int64
GetWorkerCnt() int64
AddWorkerCnt(value int64) int64
EnableAutoTune()
}
type basicPool struct {
maxWorkerCnt int64
workerCnt int64
currentWorkingCnt int64
isShutDown int32
}
// ErrTaskInvalid will be returned if the task is nil
var ErrTaskInvalid = errors.New("Task is nil")
func (pool *basicPool) GetCurrentWorkingCnt() int64 {
return atomic.LoadInt64(&pool.currentWorkingCnt)
}
func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 {
return atomic.AddInt64(&pool.currentWorkingCnt, value)
}
func (pool *basicPool) GetWorkerCnt() int64 {
return atomic.LoadInt64(&pool.workerCnt)
}
func (pool *basicPool) AddWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.workerCnt, value)
}
func (pool *basicPool) GetMaxWorkerCnt() int64 {
return atomic.LoadInt64(&pool.maxWorkerCnt)
}
func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.maxWorkerCnt, value)
}
func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool {
return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue)
}
func (pool *basicPool) EnableAutoTune() {
}
// RoutinePool defines the coroutine pool struct
type RoutinePool struct {
basicPool
taskQueue chan Task
dispatchQueue chan Task
workers map[string]*worker
cacheCnt int
wg *sync.WaitGroup
lock *sync.Mutex
shutDownWg *sync.WaitGroup
autoTune int32
}
// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function
var ErrSubmitTimeout = errors.New("Submit task timeout")
// ErrPoolShutDown will be returned if RoutinePool is shutdown
var ErrPoolShutDown = errors.New("RoutinePool is shutdown")
// ErrTaskReject will be returned if submit task is rejected
var ErrTaskReject = errors.New("Submit task is rejected")
var closeQueue = signalTask{id: "closeQueue"}
// NewRoutinePool creates a RoutinePool instance
func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}
pool := &RoutinePool{
cacheCnt: cacheCnt,
wg: new(sync.WaitGroup),
lock: new(sync.Mutex),
shutDownWg: new(sync.WaitGroup),
autoTune: 0,
}
pool.isShutDown = 0
pool.maxWorkerCnt += int64(maxWorkerCnt)
if pool.cacheCnt <= 0 {
pool.taskQueue = make(chan Task)
} else {
pool.taskQueue = make(chan Task, pool.cacheCnt)
}
pool.workers = make(map[string]*worker, pool.maxWorkerCnt)
// dispatchQueue must not have length
pool.dispatchQueue = make(chan Task)
pool.dispatcher()
return pool
}
// EnableAutoTune sets the autoTune enabled
func (pool *RoutinePool) EnableAutoTune() {
atomic.StoreInt32(&pool.autoTune, 1)
}
func (pool *RoutinePool) checkStatus(t Task) error {
if t == nil {
return ErrTaskInvalid
}
if atomic.LoadInt32(&pool.isShutDown) == 1 {
return ErrPoolShutDown
}
return nil
}
func (pool *RoutinePool) dispatcher() {
pool.shutDownWg.Add(1)
go func() {
for {
task, ok := <-pool.dispatchQueue
if !ok {
break
}
if task == closeQueue {
close(pool.taskQueue)
pool.shutDownWg.Done()
continue
}
if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() {
pool.addWorker()
}
pool.taskQueue <- task
}
}()
}
// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it
func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 {
if atomic.LoadInt32(&pool.autoTune) == 1 {
return pool.basicPool.AddMaxWorkerCnt(value)
}
return pool.GetMaxWorkerCnt()
}
func (pool *RoutinePool) addWorker() {
if atomic.LoadInt32(&pool.autoTune) == 1 {
pool.lock.Lock()
defer pool.lock.Unlock()
}
w := &worker{}
w.name = fmt.Sprintf("woker-%d", len(pool.workers))
w.taskQueue = pool.taskQueue
w.wg = pool.wg
pool.AddWorkerCnt(1)
w.pool = pool
pool.workers[w.name] = w
pool.wg.Add(1)
w.start()
}
func (pool *RoutinePool) autoTuneWorker(w *worker) bool {
if atomic.LoadInt32(&pool.autoTune) == 0 {
return false
}
if w == nil {
return false
}
workerCnt := pool.GetWorkerCnt()
maxWorkerCnt := pool.GetMaxWorkerCnt()
if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) {
pool.lock.Lock()
defer pool.lock.Unlock()
delete(pool.workers, w.name)
w.wg.Done()
w.release()
return true
}
return false
}
// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *RoutinePool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}
// Execute pushes the specified task to the dispatchQueue
func (pool *RoutinePool) Execute(t Task) {
if t != nil {
pool.dispatchQueue <- t
}
}
// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}
// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info
func (pool *RoutinePool) Submit(t Task) (Future, error) {
if err := pool.checkStatus(t); err != nil {
return nil, err
}
f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
pool.dispatchQueue <- tw
return f, nil
}
// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info.
// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time.
func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) {
if timeout <= 0 {
return pool.Submit(t)
}
if err := pool.checkStatus(t); err != nil {
return nil, err
}
timeoutChan := make(chan bool, 1)
go func() {
time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout)))
timeoutChan <- true
close(timeoutChan)
}()
f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
select {
case pool.dispatchQueue <- tw:
return f, nil
case _, ok := <-timeoutChan:
if ok {
return nil, ErrSubmitTimeout
}
return nil, ErrSubmitTimeout
}
}
func (pool *RoutinePool) beforeCloseDispatchQueue() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.dispatchQueue <- closeQueue
pool.wg.Wait()
}
func (pool *RoutinePool) doCloseDispatchQueue() {
close(pool.dispatchQueue)
pool.shutDownWg.Wait()
}
// ShutDown closes the RoutinePool instance
func (pool *RoutinePool) ShutDown() {
pool.beforeCloseDispatchQueue()
pool.doCloseDispatchQueue()
for _, w := range pool.workers {
w.release()
}
pool.workers = nil
pool.taskQueue = nil
pool.dispatchQueue = nil
}
// NoChanPool defines the coroutine pool struct
type NoChanPool struct {
basicPool
wg *sync.WaitGroup
tokens chan interface{}
}
// NewNochanPool creates a new NoChanPool instance
func NewNochanPool(maxWorkerCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}
pool := &NoChanPool{
wg: new(sync.WaitGroup),
tokens: make(chan interface{}, maxWorkerCnt),
}
pool.isShutDown = 0
pool.AddMaxWorkerCnt(int64(maxWorkerCnt))
for i := 0; i < maxWorkerCnt; i++ {
pool.tokens <- struct{}{}
}
return pool
}
func (pool *NoChanPool) acquire() {
<-pool.tokens
}
func (pool *NoChanPool) release() {
pool.tokens <- 1
}
func (pool *NoChanPool) execute(t Task) {
pool.wg.Add(1)
go func() {
pool.acquire()
defer func() {
pool.release()
pool.wg.Done()
}()
runTask(t)
}()
}
// ShutDown closes the NoChanPool instance
func (pool *NoChanPool) ShutDown() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.wg.Wait()
}
// Execute executes the specified task
func (pool *NoChanPool) Execute(t Task) {
if t != nil {
pool.execute(t)
}
}
// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *NoChanPool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}
// Submit executes the specified task, and returns the FutureResult and error info
func (pool *NoChanPool) Submit(t Task) (Future, error) {
if t == nil {
return nil, ErrTaskInvalid
}
f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
pool.execute(tw)
return f, nil
}
// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}

@ -1,243 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"encoding/json"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
accessKeyEnv = "OBS_ACCESS_KEY_ID"
securityKeyEnv = "OBS_SECRET_ACCESS_KEY"
securityTokenEnv = "OBS_SECURITY_TOKEN"
ecsRequestURL = "http://169.254.169.254/openstack/latest/securitykey"
)
type securityHolder struct {
ak string
sk string
securityToken string
}
var emptySecurityHolder = securityHolder{}
type securityProvider interface {
getSecurity() securityHolder
}
type BasicSecurityProvider struct {
val atomic.Value
}
func (bsp *BasicSecurityProvider) getSecurity() securityHolder {
if sh, ok := bsp.val.Load().(securityHolder); ok {
return sh
}
return emptySecurityHolder
}
func (bsp *BasicSecurityProvider) refresh(ak, sk, securityToken string) {
bsp.val.Store(securityHolder{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)})
}
func NewBasicSecurityProvider(ak, sk, securityToken string) *BasicSecurityProvider {
bsp := &BasicSecurityProvider{}
bsp.refresh(ak, sk, securityToken)
return bsp
}
type EnvSecurityProvider struct {
sh securityHolder
suffix string
once sync.Once
}
func (esp *EnvSecurityProvider) getSecurity() securityHolder {
//ensure run only once
esp.once.Do(func() {
esp.sh = securityHolder{
ak: strings.TrimSpace(os.Getenv(accessKeyEnv + esp.suffix)),
sk: strings.TrimSpace(os.Getenv(securityKeyEnv + esp.suffix)),
securityToken: strings.TrimSpace(os.Getenv(securityTokenEnv + esp.suffix)),
}
})
return esp.sh
}
func NewEnvSecurityProvider(suffix string) *EnvSecurityProvider {
if suffix != "" {
suffix = "_" + suffix
}
esp := &EnvSecurityProvider{
suffix: suffix,
}
return esp
}
type TemporarySecurityHolder struct {
securityHolder
expireDate time.Time
}
var emptyTemporarySecurityHolder = TemporarySecurityHolder{}
type EcsSecurityProvider struct {
val atomic.Value
lock sync.Mutex
httpClient *http.Client
prefetch int32
retryCount int
}
func (ecsSp *EcsSecurityProvider) loadTemporarySecurityHolder() (TemporarySecurityHolder, bool) {
if sh := ecsSp.val.Load(); sh == nil {
return emptyTemporarySecurityHolder, false
} else if _sh, ok := sh.(TemporarySecurityHolder); !ok {
return emptyTemporarySecurityHolder, false
} else {
return _sh, true
}
}
func (ecsSp *EcsSecurityProvider) getAndSetSecurityWithOutLock() securityHolder {
_sh := TemporarySecurityHolder{}
_sh.expireDate = time.Now().Add(time.Minute * 5)
retryCount := 0
for {
if req, err := http.NewRequest("GET", ecsRequestURL, nil); err == nil {
start := GetCurrentTimestamp()
res, err := ecsSp.httpClient.Do(req)
if err == nil {
if data, _err := ioutil.ReadAll(res.Body); _err == nil {
temp := &struct {
Credential struct {
AK string `json:"access,omitempty"`
SK string `json:"secret,omitempty"`
SecurityToken string `json:"securitytoken,omitempty"`
ExpireDate time.Time `json:"expires_at,omitempty"`
} `json:"credential"`
}{}
doLog(LEVEL_DEBUG, "Get the json data from ecs succeed")
if jsonErr := json.Unmarshal(data, temp); jsonErr == nil {
_sh.ak = temp.Credential.AK
_sh.sk = temp.Credential.SK
_sh.securityToken = temp.Credential.SecurityToken
_sh.expireDate = temp.Credential.ExpireDate.Add(time.Minute * -1)
doLog(LEVEL_INFO, "Get security from ecs succeed, AK:xxxx, SK:xxxx, SecurityToken:xxxx, ExprireDate %s", _sh.expireDate)
doLog(LEVEL_INFO, "Get security from ecs succeed, cost %d ms", (GetCurrentTimestamp() - start))
break
} else {
err = jsonErr
}
} else {
err = _err
}
}
doLog(LEVEL_WARN, "Try to get security from ecs failed, cost %d ms, err %s", (GetCurrentTimestamp() - start), err.Error())
}
if retryCount >= ecsSp.retryCount {
doLog(LEVEL_WARN, "Try to get security from ecs failed and exceed the max retry count")
break
}
sleepTime := float64(retryCount+2) * rand.Float64()
if sleepTime > 10 {
sleepTime = 10
}
time.Sleep(time.Duration(sleepTime * float64(time.Second)))
retryCount++
}
ecsSp.val.Store(_sh)
return _sh.securityHolder
}
func (ecsSp *EcsSecurityProvider) getAndSetSecurity() securityHolder {
ecsSp.lock.Lock()
defer ecsSp.lock.Unlock()
tsh, succeed := ecsSp.loadTemporarySecurityHolder()
if !succeed || time.Now().After(tsh.expireDate) {
return ecsSp.getAndSetSecurityWithOutLock()
}
return tsh.securityHolder
}
func (ecsSp *EcsSecurityProvider) getSecurity() securityHolder {
if tsh, succeed := ecsSp.loadTemporarySecurityHolder(); succeed {
if time.Now().Before(tsh.expireDate) {
//not expire
if time.Now().Add(time.Minute*5).After(tsh.expireDate) && atomic.CompareAndSwapInt32(&ecsSp.prefetch, 0, 1) {
//do prefetch
sh := ecsSp.getAndSetSecurityWithOutLock()
atomic.CompareAndSwapInt32(&ecsSp.prefetch, 1, 0)
return sh
}
return tsh.securityHolder
}
return ecsSp.getAndSetSecurity()
}
return ecsSp.getAndSetSecurity()
}
func getInternalTransport() *http.Transport {
timeout := 10
transport := &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
start := GetCurrentTimestamp()
conn, err := (&net.Dialer{
Timeout: time.Second * time.Duration(timeout),
Resolver: net.DefaultResolver,
}).Dial(network, addr)
if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http dial cost %d ms", (GetCurrentTimestamp() - start))
}
if err != nil {
return nil, err
}
return getConnDelegate(conn, timeout, timeout*10), nil
},
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
ResponseHeaderTimeout: time.Second * time.Duration(timeout),
IdleConnTimeout: time.Second * time.Duration(DEFAULT_IDLE_CONN_TIMEOUT),
DisableCompression: true,
}
return transport
}
func NewEcsSecurityProvider(retryCount int) *EcsSecurityProvider {
ecsSp := &EcsSecurityProvider{
retryCount: retryCount,
}
ecsSp.httpClient = &http.Client{Transport: getInternalTransport(), CheckRedirect: checkRedirectFunc}
return ecsSp
}

@ -1,65 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"fmt"
)
// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error
func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput, extensions ...extensionOptions) (output *CreateSignedUrlOutput, err error) {
if input == nil {
return nil, errors.New("CreateSignedUrlInput is nil")
}
params := make(map[string]string, len(input.QueryParams))
for key, value := range input.QueryParams {
params[key] = value
}
if input.SubResource != "" {
params[string(input.SubResource)] = ""
}
headers := make(map[string][]string, len(input.Headers))
for key, value := range input.Headers {
headers[key] = []string{value}
}
for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
if _err != nil {
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_INFO, "Unsupported extensionOptions")
}
}
if input.Expires <= 0 {
input.Expires = 300
}
requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires))
if err != nil {
return nil, err
}
output = &CreateSignedUrlOutput{
SignedUrl: requestURL,
ActualSignedRequestHeaders: headers,
}
return
}

@ -1,116 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"fmt"
"strings"
"time"
)
func (obsClient ObsClient) isSecurityToken(params map[string]string, sh securityHolder) {
if sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
}
}
}
// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput,
// and returns the CreateBrowserBasedSignatureOutput and error
func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
if input == nil {
return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
}
params := make(map[string]string, len(input.FormParams))
for key, value := range input.FormParams {
params[key] = value
}
date := time.Now().UTC()
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)
sh := obsClient.getSecurity()
credential, _ := getCredential(sh.ak, obsClient.conf.region, shortDate)
if input.Expires <= 0 {
input.Expires = 300
}
expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
if obsClient.conf.signature == SignatureV4 {
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
}
obsClient.isSecurityToken(params, sh)
matchAnyBucket := true
matchAnyKey := true
count := 5
if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
params["bucket"] = bucket
matchAnyBucket = false
count--
}
if key := strings.TrimSpace(input.Key); key != "" {
params["key"] = key
matchAnyKey = false
count--
}
originPolicySlice := make([]string, 0, len(params)+count)
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
originPolicySlice = append(originPolicySlice, "\"conditions\":[")
for key, value := range params {
if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
}
}
if matchAnyBucket {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
}
if matchAnyKey {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
}
originPolicySlice = append(originPolicySlice, "]}")
originPolicy := strings.Join(originPolicySlice, "")
policy := Base64Encode([]byte(originPolicy))
var signature string
if obsClient.conf.signature == SignatureV4 {
signature = getSignature(policy, sh.sk, obsClient.conf.region, shortDate)
} else {
signature = Base64Encode(HmacSha1([]byte(sh.sk), []byte(policy)))
}
output = &CreateBrowserBasedSignatureOutput{
OriginPolicy: originPolicy,
Policy: policy,
Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
Date: params[PARAM_DATE_AMZ_CAMEL],
Signature: signature,
}
return
}

@ -1,758 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"errors"
"io"
"net/http"
"os"
"strings"
)
// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers
func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
output = &ListBucketsOutput{}
err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
output = &ListObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
output = nil
}
}
}
return
}
// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
output = &ListVersionsOutput{}
err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListVersionsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
output = nil
}
}
}
return
}
// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a
// specified bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
output = &ListMultipartUploadsOutput{}
err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListMultipartUploadsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
output = nil
}
}
return
}
// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
output = &GetBucketQuotaOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
output = &GetBucketMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetBucketMetadataOutput(output)
}
return
}
// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
output = &GetBucketStorageInfoOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
output = &GetBucketPolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
if err != nil {
output = nil
}
return
}
// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
output = &GetBucketCorsOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
output = &GetBucketVersioningOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
output = &GetBucketWebsiteConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
output = &GetBucketLoggingConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
output = &GetBucketLifecycleConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
output = &GetBucketTaggingOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
output = &GetBucketNotificationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
output = &DeleteObjectOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseDeleteObjectOutput(output)
}
return
}
// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data
func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
output = &DeleteObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeDeleteObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
output = nil
}
}
return
}
// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
output = &GetObjectAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}
}
return
}
// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
output = &GetObjectMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectMetadataOutput(output)
}
return
}
// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
output = &GetObjectOutput{}
err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectOutput(output)
}
return
}
// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}
// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path
func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
var data io.Reader
sourceFile = strings.TrimSpace(sourceFile)
if sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()
stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd
var contentLength int64
if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
contentLength = StringToInt64(value[0], -1)
} else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
contentLength = StringToInt64(value[0], -1)
} else {
contentLength = stat.Size()
}
if contentLength > stat.Size() {
return nil, errors.New("ContentLength is larger than fileSize")
}
fileReaderWrapper.totalCount = contentLength
data = fileReaderWrapper
}
output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}
// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers
func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
output = &CopyObjectOutput{}
err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyObjectOutput(output)
}
return
}
// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers
func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
output = &InitiateMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseInitiateMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeInitiateMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}
// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
output = &UploadPartOutput{}
err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseUploadPartOutput(output)
}
return
}
// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
output = &CompleteMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseCompleteMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeCompleteMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}
// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
output = &ListPartsOutput{}
err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListPartsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
output = nil
}
}
return
}
// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
output = &CopyPartOutput{}
err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyPartOutput(output)
}
return
}
// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) {
output = &GetBucketRequestPaymentOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// SetBucketEncryptionWithSignedURL sets bucket encryption setting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketEncryption", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}
// GetBucketEncryptionWithSignedURL gets bucket encryption setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *GetBucketEncryptionOutput, err error) {
output = &GetBucketEncryptionOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketEncryption", HTTP_GET, signedURL, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// DeleteBucketEncryptionWithSignedURL deletes bucket encryption setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketEncryption", HTTP_DELETE, signedURL, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}
// AppendObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) AppendObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *AppendObjectOutput, err error) {
output = &AppendObjectOutput{}
err = obsClient.doHTTPWithSignedURL("AppendObject", HTTP_POST, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
if err = ParseAppendObjectOutput(output); err != nil {
output = nil
}
}
return
}
// ModifyObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) ModifyObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *ModifyObjectOutput, err error) {
output = &ModifyObjectOutput{}
err = obsClient.doHTTPWithSignedURL("ModifyObject", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseModifyObjectOutput(output)
}
return
}

@ -1,990 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//nolint:structcheck, unused
package obs
import (
"bytes"
"fmt"
"io"
"net/url"
"os"
"strconv"
"strings"
)
// IReadCloser defines interface with function: setReadCloser
type IReadCloser interface {
setReadCloser(body io.ReadCloser)
}
func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
output.Body = body
}
func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) {
if isObs {
header = HEADER_PREFIX_OBS + header
headers[header] = headerValue
} else {
header = HEADER_PREFIX + header
headers[header] = headerValue
}
}
func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) {
if isObs {
headers[header] = headerValue
} else {
headers[headerNext] = headerValue
}
}
// IBaseModel defines interface for base response model
type IBaseModel interface {
setStatusCode(statusCode int)
setRequestID(requestID string)
setResponseHeaders(responseHeaders map[string][]string)
}
// ISerializable defines interface with function: trans
type ISerializable interface {
trans(isObs bool) (map[string]string, map[string][]string, interface{}, error)
}
// DefaultSerializable defines default serializable struct
type DefaultSerializable struct {
params map[string]string
headers map[string][]string
data interface{}
}
func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) {
return input.params, input.headers, input.data, nil
}
var defaultSerializable = &DefaultSerializable{}
func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
}
func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(subResource): ""}
data, err = ConvertRequestToIoReader(input)
return
}
func (baseModel *BaseModel) setStatusCode(statusCode int) {
baseModel.StatusCode = statusCode
}
func (baseModel *BaseModel) setRequestID(requestID string) {
baseModel.RequestId = requestID
}
func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
baseModel.ResponseHeaders = responseHeaders
}
func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if input.QueryLocation && !isObs {
setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs)
}
if input.BucketType != "" {
setHeaders(headers, HEADER_BUCKET_TYPE, []string{string(input.BucketType)}, true)
}
return
}
func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
if grantReadID := input.GrantReadId; grantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs)
}
if grantWriteID := input.GrantWriteId; grantWriteID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs)
}
if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs)
}
if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs)
}
if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs)
}
if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true)
}
if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true)
}
}
func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs)
}
if epid := input.Epid; epid != "" {
setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs)
}
if availableZone := input.AvailableZone; availableZone != "" {
setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs)
}
input.prepareGrantHeaders(headers, isObs)
if input.IsFSFileInterface {
setHeaders(headers, headerFSFileInterface, []string{"Enabled"}, true)
}
if location := strings.TrimSpace(input.Location); location != "" {
input.Location = location
xml := make([]string, 0, 3)
xml = append(xml, "<CreateBucketConfiguration>")
if isObs {
xml = append(xml, fmt.Sprintf("<Location>%s</Location>", input.Location))
} else {
xml = append(xml, fmt.Sprintf("<LocationConstraint>%s</LocationConstraint>", input.Location))
}
xml = append(xml, "</CreateBucketConfiguration>")
data = strings.Join(xml, "")
}
return
}
func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
xml := make([]string, 0, 1)
if !isObs {
storageClass := "STANDARD"
if input.StorageClass == StorageClassWarm {
storageClass = string(storageClassStandardIA)
} else if input.StorageClass == StorageClassCold {
storageClass = string(storageClassGlacier)
}
params = map[string]string{string(SubResourceStoragePolicy): ""}
xml = append(xml, fmt.Sprintf("<StoragePolicy><DefaultStorageClass>%s</DefaultStorageClass></StoragePolicy>", storageClass))
} else {
if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold {
input.StorageClass = StorageClassStandard
}
params = map[string]string{string(SubResourceStorageClass): ""}
xml = append(xml, fmt.Sprintf("<StorageClass>%s</StorageClass>", input.StorageClass))
}
data = strings.Join(xml, "")
return
}
func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxKeys > 0 {
params["max-keys"] = IntToString(input.MaxKeys)
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}
func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
if input.Marker != "" {
params["marker"] = input.Marker
}
return
}
func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
params[string(SubResourceVersions)] = ""
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.VersionIdMarker != "" {
params["version-id-marker"] = input.VersionIdMarker
}
return
}
func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceUploads): ""}
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxUploads > 0 {
params["max-uploads"] = IntToString(input.MaxUploads)
}
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.UploadIdMarker != "" {
params["upload-id-marker"] = input.UploadIdMarker
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}
func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceQuota, input)
}
func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs)
}
return
}
func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourcePolicy): ""}
data = strings.NewReader(input.Policy)
return
}
func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceCors): ""}
data, md5, err := ConvertRequestToIoReaderV2(input)
if err != nil {
return
}
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}
func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceVersioning, input)
}
func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceWebsite): ""}
data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
return
}
func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}
func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLogging): ""}
data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs)
return
}
func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLifecycle): ""}
data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs)
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}
func (input SetBucketEncryptionInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceEncryption): ""}
data, _ = ConvertEncryptionConfigurationToXml(input.BucketEncryptionConfiguration, false, isObs)
return
}
func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceTagging): ""}
data, md5, err := ConvertRequestToIoReaderV2(input)
if err != nil {
return
}
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}
func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceNotification): ""}
data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs)
return
}
func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}
func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceDelete): ""}
if strings.ToLower(input.EncodingType) == "url" {
for index, object := range input.Objects {
input.Objects[index].Key = url.QueryEscape(object.Key)
}
}
data, md5 := convertDeleteObjectsToXML(input)
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}
func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs)
}
return
}
func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}
func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRestore): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
if !isObs {
data, err = ConvertRequestToIoReader(input)
} else {
data = ConverntObsRestoreToXml(input)
}
return
}
// GetEncryption gets the Encryption field value from SseKmsHeader
func (header SseKmsHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
if !header.isObs {
return DEFAULT_SSE_KMS_ENCRYPTION
}
return DEFAULT_SSE_KMS_ENCRYPTION_OBS
}
// GetKey gets the Key field value from SseKmsHeader
func (header SseKmsHeader) GetKey() string {
return header.Key
}
// GetEncryption gets the Encryption field value from SseCHeader
func (header SseCHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
return DEFAULT_SSE_C_ENCRYPTION
}
// GetKey gets the Key field value from SseCHeader
func (header SseCHeader) GetKey() string {
return header.Key
}
// GetKeyMD5 gets the KeyMD5 field value from SseCHeader
func (header SseCHeader) GetKeyMD5() string {
if header.KeyMD5 != "" {
return header.KeyMD5
}
if ret, err := Base64Decode(header.GetKey()); err == nil {
return Base64Md5(ret)
}
return ""
}
func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) {
if sseHeader != nil {
if sseCHeader, ok := sseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
} else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
sseKmsHeader.isObs = isObs
setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs)
if sseKmsHeader.GetKey() != "" {
setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs)
}
}
}
}
func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)
if input.Origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
}
if input.RequestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
}
setSseHeader(headers, input.SseHeader, true, isObs)
return
}
func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) {
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
}
func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) {
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
}
func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
params = map[string]string{string(SubResourceMetadata): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)
if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs)
} else {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs)
}
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
}
input.prepareContentHeaders(headers)
if input.Expires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
}
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
}
input.prepareStorageClass(headers, isObs)
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}
func (input GetObjectInput) prepareResponseParams(params map[string]string) {
if input.ResponseCacheControl != "" {
params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
}
if input.ResponseContentDisposition != "" {
params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
}
if input.ResponseContentEncoding != "" {
params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
}
if input.ResponseContentLanguage != "" {
params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
}
if input.ResponseContentType != "" {
params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
}
if input.ResponseExpires != "" {
params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
}
}
func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.GetObjectMetadataInput.trans(isObs)
if err != nil {
return
}
input.prepareResponseParams(params)
if input.ImageProcess != "" {
params[PARAM_IMAGE_PROCESS] = input.ImageProcess
}
if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
}
if input.IfMatch != "" {
headers[HEADER_IF_MATCH] = []string{input.IfMatch}
}
if input.IfNoneMatch != "" {
headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
}
if !input.IfModifiedSince.IsZero() {
headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
}
if !input.IfUnmodifiedSince.IsZero() {
headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
}
return
}
func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) {
if GrantReadID := input.GrantReadId; GrantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true)
}
if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true)
}
if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true)
}
if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true)
}
}
func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
params = make(map[string]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
input.prepareGrantHeaders(headers)
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
}
setSseHeader(headers, input.SseHeader, false, isObs)
if input.Expires != 0 {
setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true)
}
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}
func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}
if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}
if input.ContentLength > 0 {
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
return
}
func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
if err != nil {
return
}
if input.Body != nil {
data = input.Body
}
return
}
func (input AppendObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
if err != nil {
return
}
params[string(SubResourceAppend)] = ""
params["position"] = strconv.FormatInt(input.Position, 10)
if input.Body != nil {
data = input.Body
}
return
}
func (input ModifyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
params = make(map[string]string)
params[string(SubResourceModify)] = ""
params["position"] = strconv.FormatInt(input.Position, 10)
if input.ContentLength > 0 {
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
}
if input.Body != nil {
data = input.Body
}
return
}
func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) {
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
}
if input.Expires != "" {
headers[HEADER_EXPIRES] = []string{input.Expires}
}
}
func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) {
if input.CopySourceIfMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs)
}
if input.CopySourceIfNoneMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs)
}
if !input.CopySourceIfModifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs)
}
if !input.CopySourceIfUnmodifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs)
}
}
func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}
var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs)
}
if input.MetadataDirective == ReplaceMetadata {
input.prepareReplaceHeaders(headers)
}
input.prepareCopySourceHeaders(headers, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}
}
if input.SuccessActionRedirect != "" {
headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect}
}
return
}
func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
return
}
func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
params[string(SubResourceUploads)] = ""
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}
func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string)
setSseHeader(headers, input.SseHeader, true, isObs)
if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}
if input.Body != nil {
data = input.Body
}
return
}
func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
return
}
func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
if input.MaxParts > 0 {
params["max-parts"] = IntToString(input.MaxParts)
}
if input.PartNumberMarker > 0 {
params["part-number-marker"] = IntToString(input.PartNumberMarker)
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}
func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string, 1)
var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs)
}
setSseHeader(headers, input.SseHeader, true, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}
}
return
}
func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}
func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceRequestPayment, input)
}
type partSlice []Part
func (parts partSlice) Len() int {
return len(parts)
}
func (parts partSlice) Less(i, j int) bool {
return parts[i].PartNumber < parts[j].PartNumber
}
func (parts partSlice) Swap(i, j int) {
parts[i], parts[j] = parts[j], parts[i]
}
type readerWrapper struct {
reader io.Reader
mark int64
totalCount int64
readedCount int64
}
func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
if r, ok := rw.reader.(*strings.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*bytes.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*os.File); ok {
return r.Seek(offset, whence)
}
return offset, nil
}
func (rw *readerWrapper) Read(p []byte) (n int, err error) {
if rw.totalCount == 0 {
return 0, io.EOF
}
if rw.totalCount > 0 {
n, err = rw.reader.Read(p)
readedOnce := int64(n)
remainCount := rw.totalCount - rw.readedCount
if remainCount > readedOnce {
rw.readedCount += readedOnce
return n, err
}
rw.readedCount += remainCount
return int(remainCount), io.EOF
}
return rw.reader.Read(p)
}
type fileReaderWrapper struct {
readerWrapper
filePath string
}
func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchPolicyToJSON(input)
return
}
func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}
func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}
func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchJobToJSON(input)
return
}
func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}
func (input RenameFileInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRename): ""}
params["name"] = input.NewObjectKey
headers = make(map[string][]string)
if requestPayer := string(input.RequestPayer); requestPayer != "" {
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
}
return
}
func (input RenameFolderInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRename): ""}
params["name"] = input.NewObjectKey
headers = make(map[string][]string)
if requestPayer := string(input.RequestPayer); requestPayer != "" {
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
}
return
}

@ -1,874 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"sync/atomic"
"syscall"
)
var errAbort = errors.New("AbortError")
// FileStatus defines the upload file properties
type FileStatus struct {
XMLName xml.Name `xml:"FileInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
}
// UploadPartInfo defines the upload part properties
type UploadPartInfo struct {
XMLName xml.Name `xml:"UploadPart"`
PartNumber int `xml:"PartNumber"`
Etag string `xml:"Etag"`
PartSize int64 `xml:"PartSize"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}
// UploadCheckpoint defines the upload checkpoint file properties
type UploadCheckpoint struct {
XMLName xml.Name `xml:"UploadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId,omitempty"`
UploadFile string `xml:"FileUrl"`
FileInfo FileStatus `xml:"FileInfo"`
UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"`
}
func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool {
if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.")
return false
}
if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.")
return false
}
if ufc.UploadId == "" {
doLog(LEVEL_INFO, "UploadId is invalid. clear the record.")
return false
}
return true
}
type uploadPartTask struct {
UploadPartInput
obsClient *ObsClient
abort *int32
extensions []extensionOptions
enableCheckpoint bool
}
func (task *uploadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}
input := &UploadPartInput{}
input.Bucket = task.Bucket
input.Key = task.Key
input.PartNumber = task.PartNumber
input.UploadId = task.UploadId
input.SseHeader = task.SseHeader
input.SourceFile = task.SourceFile
input.Offset = task.Offset
input.PartSize = task.PartSize
extensions := task.extensions
var output *UploadPartOutput
var err error
if len(extensions) != 0 {
output, err = task.obsClient.UploadPart(input, extensions...)
} else {
output, err = task.obsClient.UploadPart(input)
}
if err == nil {
if output.ETag == "" {
doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber)
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber)
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return err
}
func loadCheckpointFile(checkpointFile string, result interface{}) error {
ret, err := ioutil.ReadFile(checkpointFile)
if err != nil {
return err
}
if len(ret) == 0 {
return nil
}
return xml.Unmarshal(ret, result)
}
func updateCheckpointFile(fc interface{}, checkpointFilePath string) error {
result, err := xml.Marshal(fc)
if err != nil {
return err
}
err = ioutil.WriteFile(checkpointFilePath, result, 0666)
return err
}
func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, ufc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) {
if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err))
}
} else {
return false, nil
}
return true, nil
}
func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error {
initiateInput := &InitiateMultipartUploadInput{}
initiateInput.ObjectOperationInput = input.ObjectOperationInput
initiateInput.ContentType = input.ContentType
initiateInput.EncodingType = input.EncodingType
var output *InitiateMultipartUploadOutput
var err error
if len(extensions) != 0 {
output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...)
} else {
output, err = obsClient.InitiateMultipartUpload(initiateInput)
}
if err != nil {
return err
}
ufc.Bucket = input.Bucket
ufc.Key = input.Key
ufc.UploadFile = input.UploadFile
ufc.FileInfo = FileStatus{}
ufc.FileInfo.Size = uploadFileStat.Size()
ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix()
ufc.UploadId = output.UploadId
err = sliceFile(input.PartSize, ufc)
return err
}
func sliceFile(partSize int64, ufc *UploadCheckpoint) error {
fileSize := ufc.FileInfo.Size
cnt := fileSize / partSize
if cnt >= 10000 {
partSize = fileSize / 10000
if fileSize%10000 != 0 {
partSize++
}
cnt = fileSize / partSize
}
if fileSize%partSize != 0 {
cnt++
}
if partSize > MAX_PART_SIZE {
doLog(LEVEL_ERROR, "The source upload file is too large")
return fmt.Errorf("The source upload file is too large")
}
if cnt == 0 {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = 1
ufc.UploadParts = []UploadPartInfo{uploadPart}
} else {
uploadParts := make([]UploadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = int(i) + 1
uploadPart.PartSize = partSize
uploadPart.Offset = i * partSize
uploadParts = append(uploadParts, uploadPart)
}
if value := fileSize % partSize; value != 0 {
uploadParts[cnt-1].PartSize = value
}
ufc.UploadParts = uploadParts
}
return nil
}
func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error {
input := &AbortMultipartUploadInput{}
input.Bucket = bucket
input.Key = key
input.UploadId = uploadID
if len(extensions) != 0 {
_, err := obsClient.AbortMultipartUpload(input, extensions...)
return err
}
_, err := obsClient.AbortMultipartUpload(input)
return err
}
func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error {
if uploadPartError != nil {
if enableCheckpoint {
return uploadPartError
}
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return uploadPartError
}
return nil
}
func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, encodingType string, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
completeInput := &CompleteMultipartUploadInput{}
completeInput.Bucket = ufc.Bucket
completeInput.Key = ufc.Key
completeInput.UploadId = ufc.UploadId
completeInput.EncodingType = encodingType
parts := make([]Part, 0, len(ufc.UploadParts))
for _, uploadPart := range ufc.UploadParts {
part := Part{}
part.PartNumber = uploadPart.PartNumber
part.ETag = uploadPart.Etag
parts = append(parts, part)
}
completeInput.Parts = parts
var completeOutput *CompleteMultipartUploadOutput
if len(extensions) != 0 {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...)
} else {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput)
}
if err == nil {
if enableCheckpoint {
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err)
}
}
return completeOutput, err
}
if !enableCheckpoint {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
}
return completeOutput, err
}
func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
uploadFileStat, err := os.Stat(input.UploadFile)
if err != nil {
doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err))
return nil, err
}
if uploadFileStat.IsDir() {
doLog(LEVEL_ERROR, "UploadFile can not be a folder.")
return nil, errors.New("uploadFile can not be a folder")
}
ufc := &UploadCheckpoint{}
var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}
}
if needCheckpoint {
err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}
if enableCheckpoint {
err = updateCheckpointFile(ufc, checkpointFilePath)
if err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err)
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return nil, err
}
}
}
uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions)
err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions)
if err != nil {
return nil, err
}
completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, input.EncodingType, extensions)
return completeOutput, err
}
func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) {
if uploadPartOutput, ok := result.(*UploadPartOutput); ok {
lock.Lock()
defer lock.Unlock()
ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag
ufc.UploadParts[partNum-1].IsCompleted = true
if enableCheckpoint {
_err := updateCheckpointFile(ufc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}
func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var uploadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)
for _, uploadPart := range ufc.UploadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if uploadPart.IsCompleted {
continue
}
task := uploadPartTask{
UploadPartInput: UploadPartInput{
Bucket: ufc.Bucket,
Key: ufc.Key,
PartNumber: uploadPart.PartNumber,
UploadId: ufc.UploadId,
SseHeader: input.SseHeader,
SourceFile: input.UploadFile,
Offset: uploadPart.Offset,
PartSize: uploadPart.PartSize,
},
obsClient: &obsClient,
abort: &abort,
extensions: extensions,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
uploadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := uploadPartError.Load().(error); ok {
return err
}
return nil
}
// ObjectInfo defines download object info
type ObjectInfo struct {
XMLName xml.Name `xml:"ObjectInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
ETag string `xml:"ETag"`
}
// TempFileInfo defines temp download file properties
type TempFileInfo struct {
XMLName xml.Name `xml:"TempFileInfo"`
TempFileUrl string `xml:"TempFileUrl"`
Size int64 `xml:"Size"`
}
// DownloadPartInfo defines download part properties
type DownloadPartInfo struct {
XMLName xml.Name `xml:"DownloadPart"`
PartNumber int64 `xml:"PartNumber"`
RangeEnd int64 `xml:"RangeEnd"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}
// DownloadCheckpoint defines download checkpoint file properties
type DownloadCheckpoint struct {
XMLName xml.Name `xml:"DownloadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId,omitempty"`
DownloadFile string `xml:"FileUrl"`
ObjectInfo ObjectInfo `xml:"ObjectInfo"`
TempFileInfo TempFileInfo `xml:"TempFileInfo"`
DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"`
}
func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool {
if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.")
return false
}
if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.")
return false
}
if dfc.TempFileInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.")
return false
}
stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl)
if err != nil || stat.Size() != dfc.ObjectInfo.Size {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.")
return false
}
return true
}
type downloadPartTask struct {
GetObjectInput
obsClient *ObsClient
extensions []extensionOptions
abort *int32
partNumber int64
tempFileURL string
enableCheckpoint bool
}
func (task *downloadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}
getObjectInput := &GetObjectInput{}
getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput
getObjectInput.IfMatch = task.IfMatch
getObjectInput.IfNoneMatch = task.IfNoneMatch
getObjectInput.IfModifiedSince = task.IfModifiedSince
getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince
getObjectInput.RangeStart = task.RangeStart
getObjectInput.RangeEnd = task.RangeEnd
var output *GetObjectOutput
var err error
if len(task.extensions) != 0 {
output, err = task.obsClient.GetObject(getObjectInput, task.extensions...)
} else {
output, err = task.obsClient.GetObject(getObjectInput)
}
if err == nil {
defer func() {
errMsg := output.Body.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close response body.")
}
}()
_err := updateDownloadFile(task.tempFileURL, task.RangeStart, output)
if _err != nil {
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return _err
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return err
}
func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) {
if len(extensions) != 0 {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...)
} else {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput)
}
return
}
func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, dfc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !dfc.isValid(input, output) {
if dfc.TempFileInfo.TempFileUrl != "" {
_err := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err)
}
} else {
return false, nil
}
return true, nil
}
func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) {
cnt := objectSize / partSize
if objectSize%partSize > 0 {
cnt++
}
if cnt == 0 {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = 1
dfc.DownloadParts = []DownloadPartInfo{downloadPart}
} else {
downloadParts := make([]DownloadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = i + 1
downloadPart.Offset = i * partSize
downloadPart.RangeEnd = (i+1)*partSize - 1
downloadParts = append(downloadParts, downloadPart)
}
dfc.DownloadParts = downloadParts
if value := objectSize % partSize; value > 0 {
dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1
}
}
}
func createFile(tempFileURL string, fileSize int64) error {
fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := syscall.Close(fd)
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
err = syscall.Ftruncate(fd, fileSize)
if err != nil {
doLog(LEVEL_WARN, "Failed to create file with error [%v].", err)
}
return err
}
func prepareTempFile(tempFileURL string, fileSize int64) error {
parentDir := filepath.Dir(tempFileURL)
stat, err := os.Stat(parentDir)
if err != nil {
doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err)
_err := os.MkdirAll(parentDir, os.ModePerm)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err)
return _err
}
} else if !stat.IsDir() {
doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir)
return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir)
}
err = createFile(tempFileURL, fileSize)
if err == nil {
return nil
}
fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
if fileSize > 0 {
_, err = fd.WriteAt([]byte("a"), fileSize-1)
if err != nil {
doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err)
return err
}
}
return nil
}
func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error {
if downloadFileError != nil {
if !enableCheckpoint {
_err := os.Remove(tempFileURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
return downloadFileError
}
return nil
}
func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) {
getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions)
if err != nil {
return nil, err
}
objectSize := getObjectmetaOutput.ContentLength
partSize := input.PartSize
dfc := &DownloadCheckpoint{}
var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput)
if err != nil {
return nil, err
}
}
if needCheckpoint {
dfc.Bucket = input.Bucket
dfc.Key = input.Key
dfc.VersionId = input.VersionId
dfc.DownloadFile = input.DownloadFile
dfc.ObjectInfo = ObjectInfo{}
dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix()
dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength
dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag
dfc.TempFileInfo = TempFileInfo{}
dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp"
dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength
sliceObject(objectSize, partSize, dfc)
_err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size)
if _err != nil {
return nil, _err
}
if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err)
_errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _errMsg != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg)
}
return nil, _err
}
}
}
downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions)
err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError)
if err != nil {
return nil, err
}
err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile)
if err != nil {
doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err)
return nil, err
}
if enableCheckpoint {
err = os.Remove(checkpointFilePath)
if err != nil {
doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err)
}
}
return getObjectmetaOutput, nil
}
func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error {
fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
_, err = fd.Seek(rangeStart, 0)
if err != nil {
doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err)
return err
}
fileWriter := bufio.NewWriterSize(fd, 65536)
part := make([]byte, 8192)
var readErr error
var readCount int
for {
readCount, readErr = output.Body.Read(part)
if readCount > 0 {
wcnt, werr := fileWriter.Write(part[0:readCount])
if werr != nil {
doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr)
return werr
}
if wcnt != readCount {
doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
}
}
if readErr != nil {
if readErr != io.EOF {
doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr)
return readErr
}
break
}
}
err = fileWriter.Flush()
if err != nil {
doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err)
return err
}
return nil
}
func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) {
if _, ok := result.(*GetObjectOutput); ok {
lock.Lock()
defer lock.Unlock()
dfc.DownloadParts[partNum-1].IsCompleted = true
if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFile)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}
func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var downloadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)
for _, downloadPart := range dfc.DownloadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if downloadPart.IsCompleted {
continue
}
task := downloadPartTask{
GetObjectInput: GetObjectInput{
GetObjectMetadataInput: input.GetObjectMetadataInput,
IfMatch: input.IfMatch,
IfNoneMatch: input.IfNoneMatch,
IfUnmodifiedSince: input.IfUnmodifiedSince,
IfModifiedSince: input.IfModifiedSince,
RangeStart: downloadPart.Offset,
RangeEnd: downloadPart.RangeEnd,
},
obsClient: &obsClient,
extensions: extensions,
abort: &abort,
partNumber: downloadPart.PartNumber,
tempFileURL: dfc.TempFileInfo.TempFileUrl,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
downloadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := downloadPartError.Load().(error); ok {
return err
}
return nil
}

@ -1,551 +0,0 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package obs
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)
var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$")
var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+")
var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+")
// StringContains replaces subStr in src with subTranscoding and returns the new string
func StringContains(src string, subStr string, subTranscoding string) string {
return strings.Replace(src, subStr, subTranscoding, -1)
}
// XmlTranscoding replaces special characters with their escaped form
func XmlTranscoding(src string) string {
srcTmp := StringContains(src, "&", "&amp;")
srcTmp = StringContains(srcTmp, "<", "&lt;")
srcTmp = StringContains(srcTmp, ">", "&gt;")
srcTmp = StringContains(srcTmp, "'", "&apos;")
srcTmp = StringContains(srcTmp, "\"", "&quot;")
return srcTmp
}
// StringToInt converts string value to int value with default value
func StringToInt(value string, def int) int {
ret, err := strconv.Atoi(value)
if err != nil {
ret = def
}
return ret
}
// StringToInt64 converts string value to int64 value with default value
func StringToInt64(value string, def int64) int64 {
ret, err := strconv.ParseInt(value, 10, 64)
if err != nil {
ret = def
}
return ret
}
// IntToString converts int value to string value
func IntToString(value int) string {
return strconv.Itoa(value)
}
// Int64ToString converts int64 value to string value
func Int64ToString(value int64) string {
return strconv.FormatInt(value, 10)
}
// GetCurrentTimestamp gets unix time in milliseconds
func GetCurrentTimestamp() int64 {
return time.Now().UnixNano() / 1000000
}
// FormatUtcNow gets a textual representation of the UTC format time value
func FormatUtcNow(format string) string {
return time.Now().UTC().Format(format)
}
// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value
func FormatUtcToRfc1123(t time.Time) string {
ret := t.UTC().Format(time.RFC1123)
return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
}
// Md5 gets the md5 value of input
func Md5(value []byte) []byte {
m := md5.New()
_, err := m.Write(value)
if err != nil {
doLog(LEVEL_WARN, "MD5 failed to write")
}
return m.Sum(nil)
}
// HmacSha1 gets hmac sha1 value of input
func HmacSha1(key, value []byte) []byte {
mac := hmac.New(sha1.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha1 failed to write")
}
return mac.Sum(nil)
}
// HmacSha256 get hmac sha256 value if input
func HmacSha256(key, value []byte) []byte {
mac := hmac.New(sha256.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha256 failed to write")
}
return mac.Sum(nil)
}
// Base64Encode wrapper of base64.StdEncoding.EncodeToString
func Base64Encode(value []byte) string {
return base64.StdEncoding.EncodeToString(value)
}
// Base64Decode wrapper of base64.StdEncoding.DecodeString
func Base64Decode(value string) ([]byte, error) {
return base64.StdEncoding.DecodeString(value)
}
// HexMd5 returns the md5 value of input in hexadecimal format
func HexMd5(value []byte) string {
return Hex(Md5(value))
}
// Base64Md5 returns the md5 value of input with Base64Encode
func Base64Md5(value []byte) string {
return Base64Encode(Md5(value))
}
// Sha256Hash returns sha256 checksum
func Sha256Hash(value []byte) []byte {
hash := sha256.New()
_, err := hash.Write(value)
if err != nil {
doLog(LEVEL_WARN, "Sha256Hash failed to write")
}
return hash.Sum(nil)
}
// ParseXml wrapper of xml.Unmarshal
func ParseXml(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return xml.Unmarshal(value, result)
}
// parseJSON wrapper of json.Unmarshal
func parseJSON(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return json.Unmarshal(value, result)
}
// TransToXml wrapper of xml.Marshal
func TransToXml(value interface{}) ([]byte, error) {
if value == nil {
return []byte{}, nil
}
return xml.Marshal(value)
}
// Hex wrapper of hex.EncodeToString
func Hex(value []byte) string {
return hex.EncodeToString(value)
}
// HexSha256 returns the Sha256Hash value of input in hexadecimal format
func HexSha256(value []byte) string {
return Hex(Sha256Hash(value))
}
// UrlDecode wrapper of url.QueryUnescape
func UrlDecode(value string) (string, error) {
ret, err := url.QueryUnescape(value)
if err == nil {
return ret, nil
}
return "", err
}
// UrlDecodeWithoutError wrapper of UrlDecode
func UrlDecodeWithoutError(value string) string {
ret, err := UrlDecode(value)
if err == nil {
return ret
}
if isErrorLogEnabled() {
doLog(LEVEL_ERROR, "Url decode error")
}
return ""
}
// IsIP checks whether the value matches ip address
func IsIP(value string) bool {
return ipRegex.MatchString(value)
}
// UrlEncode encodes the input value
func UrlEncode(value string, chineseOnly bool) string {
if chineseOnly {
values := make([]string, 0, len(value))
for _, val := range value {
_value := string(val)
if regex.MatchString(_value) {
_value = url.QueryEscape(_value)
}
values = append(values, _value)
}
return strings.Join(values, "")
}
return url.QueryEscape(value)
}
func copyHeaders(m map[string][]string) (ret map[string][]string) {
if m != nil {
ret = make(map[string][]string, len(m))
for key, values := range m {
_values := make([]string, 0, len(values))
for _, value := range values {
_values = append(_values, value)
}
ret[strings.ToLower(key)] = _values
}
} else {
ret = make(map[string][]string)
}
return
}
func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) {
signature = "v2"
if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 {
if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) {
signature = "v4"
matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0])
if len(matches) >= 3 {
region = matches[1]
regions := regionRegex.FindStringSubmatch(region)
if len(regions) >= 2 {
region = regions[1]
}
signedHeaders = matches[2]
}
} else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) {
signature = "v2"
}
}
return
}
func getTemporaryKeys() []string {
return []string{
"Signature",
"signature",
"X-Amz-Signature",
"x-amz-signature",
}
}
func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool {
isObs := true
if isTemporary {
for _, value := range querys {
keyPrefix := strings.ToLower(value)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
} else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) {
isObs = false
}
}
} else {
for key := range headers {
keyPrefix := strings.ToLower(key)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
break
}
}
}
return isObs
}
func isPathStyle(headers map[string][]string, bucketName string) bool {
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
return true
}
return false
}
// GetV2Authorization v2 Authorization
func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}
method = strings.ToUpper(method)
querys := strings.Split(queryURL, "&")
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
params := make(map[string]string)
for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
headers = copyHeaders(headers)
pathStyle := isPathStyle(headers, bucketName)
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}
conf.signature = SignatureObs
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true)
v2HashPrefix := OBS_HASH_PREFIX
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
return
}
func getQuerysResult(querys []string) []string {
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
return querysResult
}
func getParams(querysResult []string) map[string]string {
params := make(map[string]string)
for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
return params
}
func getTemporaryAndSignature(params map[string]string) (bool, string) {
isTemporary := false
signature := "v2"
temporaryKeys := getTemporaryKeys()
for _, key := range temporaryKeys {
if _, ok := params[key]; ok {
isTemporary = true
if strings.ToLower(key) == "signature" {
signature = "v2"
} else if strings.ToLower(key) == "x-amz-signature" {
signature = "v4"
}
break
}
}
return isTemporary, signature
}
// GetAuthorization Authorization
func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}
method = strings.ToUpper(method)
querys := strings.Split(queryURL, "&")
querysResult := getQuerysResult(querys)
params := getParams(querysResult)
isTemporary, signature := getTemporaryAndSignature(params)
isObs := getIsObs(isTemporary, querysResult, headers)
headers = copyHeaders(headers)
pathStyle := false
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
pathStyle = true
}
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}
if isTemporary {
return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs)
}
signature, region, signedHeaders := parseHeaders(headers)
if signature == "v4" {
conf.signature = SignatureV4
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestURL")
return nil
}
headerKeys := strings.Split(signedHeaders, ";")
_headers := make(map[string][]string, len(headerKeys))
for _, headerKey := range headerKeys {
_headers[headerKey] = headers[headerKey]
}
ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers)
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
v2HashPrefix := V2_HASH_PREFIX
if isObs {
v2HashPrefix = OBS_HASH_PREFIX
}
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
}
return
}
func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string,
headers map[string][]string, isObs bool) (ret map[string]string) {
if signature == "v4" {
conf.signature = SignatureV4
longDate, ok := params[PARAM_DATE_AMZ_CAMEL]
if !ok {
longDate = params[HEADER_DATE_AMZ]
}
shortDate := longDate[:8]
credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL]
if !ok {
credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)]
}
_credential := UrlDecodeWithoutError(credential)
regions := regionRegex.FindStringSubmatch(_credential)
var region string
if len(regions) >= 2 {
region = regions[1]
}
_, scope := getCredential(ak, region, shortDate)
expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL]
if !ok {
expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)]
}
signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL]
if !ok {
signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)]
}
algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL]
if !ok {
algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)]
}
if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok {
delete(params, PARAM_SIGNATURE_AMZ_CAMEL)
} else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok {
delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL))
}
ret = make(map[string]string, 6)
ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm
ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
ret[PARAM_DATE_AMZ_CAMEL] = longDate
ret[PARAM_EXPIRES_AMZ_CAMEL] = expires
ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestUrl")
return nil
}
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers)
ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false)
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
expires, ok := params["Expires"]
if !ok {
expires = params["expires"]
}
headers[HEADER_DATE_CAMEL] = []string{expires}
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
ret = make(map[string]string, 3)
ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false)
ret["AWSAccessKeyId"] = UrlEncode(ak, false)
ret["Expires"] = UrlEncode(expires, false)
}
return
}

@ -1,26 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
# IDE project files
.idea

@ -1,24 +0,0 @@
Copyright (c) 2014, 辣椒面
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,122 +0,0 @@
### Note from current maintainers:
A currently maintained fork of this project has been migrated to https://github.com/go-co-op/gocron
Disclaimer: we (the maintainers) tried, with no luck, to get in contact with Jason (the repository owner) in order to add new maintainers or leave the project within an organization. Unfortunately, he hasn't replied for months now (March, 2020).
So, we decided to move the project to a new repository (as stated above), in order to keep the evolution of the project coming from as many people as possible. Feel free to reach over!
## goCron: A Golang Job Scheduling Package.
This package is currently looking for new maintainers (cause @jasonlvhit is in [ICU](https://github.com/996icu/996.ICU)). Please message @jasonlvhit if you are interested.
[![GgoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](http://godoc.org/github.com/jasonlvhit/gocron)
[![Go Report Card](https://goreportcard.com/badge/github.com/jasonlvhit/gocron)](https://goreportcard.com/report/github.com/jasonlvhit/gocron)
goCron is a Golang job scheduling package which lets you run Go functions periodically at pre-determined interval using a simple, human-friendly syntax.
goCron is a Golang implementation of Ruby module [clockwork](https://github.com/tomykaira/clockwork) and Python job scheduling package [schedule](https://github.com/dbader/schedule), and personally, this package is my first Golang program, just for fun and practice.
See also this two great articles:
- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/)
- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/)
If you want to chat, you can find us at Slack! [<img src="https://img.shields.io/badge/gophers-gocron-brightgreen?logo=slack">](https://gophers.slack.com/archives/CQ7T0T1FW)
Back to this package, you could just use this simple API as below, to run a cron scheduler.
```go
package main
import (
"fmt"
"time"
"github.com/jasonlvhit/gocron"
)
func task() {
fmt.Println("I am running task.")
}
func taskWithParams(a int, b string) {
fmt.Println(a, b)
}
func main() {
// Do jobs without params
gocron.Every(1).Second().Do(task)
gocron.Every(2).Seconds().Do(task)
gocron.Every(1).Minute().Do(task)
gocron.Every(2).Minutes().Do(task)
gocron.Every(1).Hour().Do(task)
gocron.Every(2).Hours().Do(task)
gocron.Every(1).Day().Do(task)
gocron.Every(2).Days().Do(task)
gocron.Every(1).Week().Do(task)
gocron.Every(2).Weeks().Do(task)
// Do jobs with params
gocron.Every(1).Second().Do(taskWithParams, 1, "hello")
// Do jobs on specific weekday
gocron.Every(1).Monday().Do(task)
gocron.Every(1).Thursday().Do(task)
// Do a job at a specific time - 'hour:min:sec' - seconds optional
gocron.Every(1).Day().At("10:30").Do(task)
gocron.Every(1).Monday().At("18:30").Do(task)
gocron.Every(1).Tuesday().At("18:30:59").Do(task)
// Begin job immediately upon start
gocron.Every(1).Hour().From(gocron.NextTick()).Do(task)
// Begin job at a specific date/time
t := time.Date(2019, time.November, 10, 15, 0, 0, 0, time.Local)
gocron.Every(1).Hour().From(&t).Do(task)
// NextRun gets the next running time
_, time := gocron.NextRun()
fmt.Println(time)
// Remove a specific job
gocron.Remove(task)
// Clear all scheduled jobs
gocron.Clear()
// Start all the pending jobs
<- gocron.Start()
// also, you can create a new scheduler
// to run two schedulers concurrently
s := gocron.NewScheduler()
s.Every(3).Seconds().Do(task)
<- s.Start()
}
```
and full test cases and [document](http://godoc.org/github.com/jasonlvhit/gocron) will be coming soon (help is wanted! If you want to contribute, pull requests are welcome).
If you need to prevent a job from running at the same time from multiple cron instances (like running a cron app from multiple servers),
you can provide a [Locker implementation](example/lock.go) and lock the required jobs.
```go
gocron.SetLocker(lockerImplementation)
gocron.Every(1).Hour().Lock().Do(task)
```
Once again, thanks to the great works of Ruby clockwork and Python schedule package. BSD license is used, see the file License for detail.
Looking to contribute? Try to follow these guidelines:
* Use issues for everything
* For a small change, just send a PR!
* For bigger changes, please open an issue for discussion before sending a PR.
* PRs should have: tests, documentation and examples (if it makes sense)
* You can also contribute by:
* Reporting issues
* Suggesting new features or enhancements
* Improving/fixing documentation
Have fun!

@ -1,126 +0,0 @@
// Package gocron : A Golang Job Scheduling Package.
//
// An in-process scheduler for periodic jobs that uses the builder pattern
// for configuration. Schedule lets you run Golang functions periodically
// at pre-determined intervals using a simple, human-friendly syntax.
//
// Inspired by the Ruby module clockwork <https://github.com/tomykaira/clockwork>
// and
// Python package schedule <https://github.com/dbader/schedule>
//
// See also
// http://adam.heroku.com/past/2010/4/13/rethinking_cron/
// http://adam.heroku.com/past/2010/6/30/replace_cron_with_clockwork/
//
// Copyright 2014 Jason Lyu. jasonlvhit@gmail.com .
// All rights reserved.
// Use of this source code is governed by a BSD-style .
// license that can be found in the LICENSE file.
package gocron
import (
"crypto/sha256"
"fmt"
"reflect"
"runtime"
"strconv"
"strings"
"time"
)
// Locker provides a method to lock jobs from running
// at the same time on multiple instances of gocron.
// You can provide any locker implementation you wish.
type Locker interface {
Lock(key string) (bool, error)
Unlock(key string) error
}
type timeUnit int
// MAXJOBNUM max number of jobs, hack it if you need.
const MAXJOBNUM = 10000
//go:generate stringer -type=timeUnit
const (
seconds timeUnit = iota + 1
minutes
hours
days
weeks
)
var (
loc = time.Local // Time location, default set by the time.Local (*time.Location)
locker Locker
)
// ChangeLoc change default the time location
func ChangeLoc(newLocation *time.Location) {
loc = newLocation
defaultScheduler.ChangeLoc(newLocation)
}
// SetLocker sets a locker implementation
func SetLocker(l Locker) {
locker = l
}
func callJobFuncWithParams(jobFunc interface{}, params []interface{}) ([]reflect.Value, error) {
f := reflect.ValueOf(jobFunc)
if len(params) != f.Type().NumIn() {
return nil, ErrParamsNotAdapted
}
in := make([]reflect.Value, len(params))
for k, param := range params {
in[k] = reflect.ValueOf(param)
}
return f.Call(in), nil
}
// for given function fn, get the name of function.
func getFunctionName(fn interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
}
func getFunctionKey(funcName string) string {
h := sha256.New()
h.Write([]byte(funcName))
return fmt.Sprintf("%x", h.Sum(nil))
}
// Jobs returns the list of Jobs from the defaultScheduler
func Jobs() []*Job {
return defaultScheduler.Jobs()
}
func formatTime(t string) (hour, min, sec int, err error) {
ts := strings.Split(t, ":")
if len(ts) < 2 || len(ts) > 3 {
return 0, 0, 0, ErrTimeFormat
}
if hour, err = strconv.Atoi(ts[0]); err != nil {
return 0, 0, 0, err
}
if min, err = strconv.Atoi(ts[1]); err != nil {
return 0, 0, 0, err
}
if len(ts) == 3 {
if sec, err = strconv.Atoi(ts[2]); err != nil {
return 0, 0, 0, err
}
}
if hour < 0 || hour > 23 || min < 0 || min > 59 || sec < 0 || sec > 59 {
return 0, 0, 0, ErrTimeFormat
}
return hour, min, sec, nil
}
// NextTick returns a pointer to a time that will run at the next tick
func NextTick() *time.Time {
now := time.Now().Add(time.Second)
return &now
}

@ -1,365 +0,0 @@
package gocron
import (
"errors"
"fmt"
"log"
"reflect"
"time"
)
var (
ErrTimeFormat = errors.New("time format error")
ErrParamsNotAdapted = errors.New("the number of params is not adapted")
ErrNotAFunction = errors.New("only functions can be schedule into the job queue")
ErrPeriodNotSpecified = errors.New("unspecified job period")
ErrParameterCannotBeNil = errors.New("nil paramaters cannot be used with reflection")
)
// Job struct keeping information about job
type Job struct {
interval uint64 // pause interval * unit between runs
jobFunc string // the job jobFunc to run, func[jobFunc]
unit timeUnit // time units, ,e.g. 'minutes', 'hours'...
atTime time.Duration // optional time at which this job runs
err error // error related to job
loc *time.Location // optional timezone that the atTime is in
lastRun time.Time // datetime of last run
nextRun time.Time // datetime of next run
startDay time.Weekday // Specific day of the week to start on
funcs map[string]interface{} // Map for the function task store
fparams map[string][]interface{} // Map for function and params of function
lock bool // lock the job from running at same time form multiple instances
tags []string // allow the user to tag jobs with certain labels
}
// NewJob creates a new job with the time interval.
func NewJob(interval uint64) *Job {
return &Job{
interval: interval,
loc: loc,
lastRun: time.Unix(0, 0),
nextRun: time.Unix(0, 0),
startDay: time.Sunday,
funcs: make(map[string]interface{}),
fparams: make(map[string][]interface{}),
tags: []string{},
}
}
// True if the job should be run now
func (j *Job) shouldRun() bool {
return time.Now().Unix() >= j.nextRun.Unix()
}
//Run the job and immediately reschedule it
func (j *Job) run() ([]reflect.Value, error) {
if j.lock {
if locker == nil {
return nil, fmt.Errorf("trying to lock %s with nil locker", j.jobFunc)
}
key := getFunctionKey(j.jobFunc)
locker.Lock(key)
defer locker.Unlock(key)
}
result, err := callJobFuncWithParams(j.funcs[j.jobFunc], j.fparams[j.jobFunc])
if err != nil {
return nil, err
}
return result, nil
}
// Err should be checked to ensure an error didn't occur creating the job
func (j *Job) Err() error {
return j.err
}
// Do specifies the jobFunc that should be called every time the job runs
func (j *Job) Do(jobFun interface{}, params ...interface{}) error {
if j.err != nil {
return j.err
}
typ := reflect.TypeOf(jobFun)
if typ.Kind() != reflect.Func {
return ErrNotAFunction
}
fname := getFunctionName(jobFun)
j.funcs[fname] = jobFun
j.fparams[fname] = params
j.jobFunc = fname
now := time.Now().In(j.loc)
if !j.nextRun.After(now) {
j.scheduleNextRun()
}
return nil
}
// DoSafely does the same thing as Do, but logs unexpected panics, instead of unwinding them up the chain
// Deprecated: DoSafely exists due to historical compatibility and will be removed soon. Use Do instead
func (j *Job) DoSafely(jobFun interface{}, params ...interface{}) error {
recoveryWrapperFunc := func() {
defer func() {
if r := recover(); r != nil {
log.Printf("Internal panic occurred: %s", r)
}
}()
_, _ = callJobFuncWithParams(jobFun, params)
}
return j.Do(recoveryWrapperFunc)
}
// At schedules job at specific time of day
// s.Every(1).Day().At("10:30:01").Do(task)
// s.Every(1).Monday().At("10:30:01").Do(task)
func (j *Job) At(t string) *Job {
hour, min, sec, err := formatTime(t)
if err != nil {
j.err = ErrTimeFormat
return j
}
// save atTime start as duration from midnight
j.atTime = time.Duration(hour)*time.Hour + time.Duration(min)*time.Minute + time.Duration(sec)*time.Second
return j
}
// GetAt returns the specific time of day the job will run at
// s.Every(1).Day().At("10:30").GetAt() == "10:30"
func (j *Job) GetAt() string {
return fmt.Sprintf("%d:%d", j.atTime/time.Hour, (j.atTime%time.Hour)/time.Minute)
}
// Loc sets the location for which to interpret "At"
// s.Every(1).Day().At("10:30").Loc(time.UTC).Do(task)
func (j *Job) Loc(loc *time.Location) *Job {
j.loc = loc
return j
}
// Tag allows you to add labels to a job
// they don't impact the functionality of the job.
func (j *Job) Tag(t string, others ...string) {
j.tags = append(j.tags, t)
for _, tag := range others {
j.tags = append(j.tags, tag)
}
}
// Untag removes a tag from a job
func (j *Job) Untag(t string) {
newTags := []string{}
for _, tag := range j.tags {
if t != tag {
newTags = append(newTags, tag)
}
}
j.tags = newTags
}
// Tags returns the tags attached to the job
func (j *Job) Tags() []string {
return j.tags
}
func (j *Job) periodDuration() (time.Duration, error) {
interval := time.Duration(j.interval)
var periodDuration time.Duration
switch j.unit {
case seconds:
periodDuration = interval * time.Second
case minutes:
periodDuration = interval * time.Minute
case hours:
periodDuration = interval * time.Hour
case days:
periodDuration = interval * time.Hour * 24
case weeks:
periodDuration = interval * time.Hour * 24 * 7
default:
return 0, ErrPeriodNotSpecified
}
return periodDuration, nil
}
// roundToMidnight truncate time to midnight
func (j *Job) roundToMidnight(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, j.loc)
}
// scheduleNextRun Compute the instant when this job should run next
func (j *Job) scheduleNextRun() error {
now := time.Now()
if j.lastRun == time.Unix(0, 0) {
j.lastRun = now
}
periodDuration, err := j.periodDuration()
if err != nil {
return err
}
switch j.unit {
case seconds, minutes, hours:
j.nextRun = j.lastRun.Add(periodDuration)
case days:
j.nextRun = j.roundToMidnight(j.lastRun)
j.nextRun = j.nextRun.Add(j.atTime)
case weeks:
j.nextRun = j.roundToMidnight(j.lastRun)
dayDiff := int(j.startDay)
dayDiff -= int(j.nextRun.Weekday())
if dayDiff != 0 {
j.nextRun = j.nextRun.Add(time.Duration(dayDiff) * 24 * time.Hour)
}
j.nextRun = j.nextRun.Add(j.atTime)
}
// advance to next possible schedule
for j.nextRun.Before(now) || j.nextRun.Before(j.lastRun) {
j.nextRun = j.nextRun.Add(periodDuration)
}
return nil
}
// NextScheduledTime returns the time of when this job is to run next
func (j *Job) NextScheduledTime() time.Time {
return j.nextRun
}
// set the job's unit with seconds,minutes,hours...
func (j *Job) mustInterval(i uint64) error {
if j.interval != i {
return fmt.Errorf("interval must be %d", i)
}
return nil
}
// From schedules the next run of the job
func (j *Job) From(t *time.Time) *Job {
j.nextRun = *t
return j
}
// setUnit sets unit type
func (j *Job) setUnit(unit timeUnit) *Job {
j.unit = unit
return j
}
// Seconds set the unit with seconds
func (j *Job) Seconds() *Job {
return j.setUnit(seconds)
}
// Minutes set the unit with minute
func (j *Job) Minutes() *Job {
return j.setUnit(minutes)
}
// Hours set the unit with hours
func (j *Job) Hours() *Job {
return j.setUnit(hours)
}
// Days set the job's unit with days
func (j *Job) Days() *Job {
return j.setUnit(days)
}
// Weeks sets the units as weeks
func (j *Job) Weeks() *Job {
return j.setUnit(weeks)
}
// Second sets the unit with second
func (j *Job) Second() *Job {
j.mustInterval(1)
return j.Seconds()
}
// Minute sets the unit with minute, which interval is 1
func (j *Job) Minute() *Job {
j.mustInterval(1)
return j.Minutes()
}
// Hour sets the unit with hour, which interval is 1
func (j *Job) Hour() *Job {
j.mustInterval(1)
return j.Hours()
}
// Day sets the job's unit with day, which interval is 1
func (j *Job) Day() *Job {
j.mustInterval(1)
return j.Days()
}
// Week sets the job's unit with week, which interval is 1
func (j *Job) Week() *Job {
j.mustInterval(1)
return j.Weeks()
}
// Weekday start job on specific Weekday
func (j *Job) Weekday(startDay time.Weekday) *Job {
j.mustInterval(1)
j.startDay = startDay
return j.Weeks()
}
// GetWeekday returns which day of the week the job will run on
// This should only be used when .Weekday(...) was called on the job.
func (j *Job) GetWeekday() time.Weekday {
return j.startDay
}
// Monday set the start day with Monday
// - s.Every(1).Monday().Do(task)
func (j *Job) Monday() (job *Job) {
return j.Weekday(time.Monday)
}
// Tuesday sets the job start day Tuesday
func (j *Job) Tuesday() *Job {
return j.Weekday(time.Tuesday)
}
// Wednesday sets the job start day Wednesday
func (j *Job) Wednesday() *Job {
return j.Weekday(time.Wednesday)
}
// Thursday sets the job start day Thursday
func (j *Job) Thursday() *Job {
return j.Weekday(time.Thursday)
}
// Friday sets the job start day Friday
func (j *Job) Friday() *Job {
return j.Weekday(time.Friday)
}
// Saturday sets the job start day Saturday
func (j *Job) Saturday() *Job {
return j.Weekday(time.Saturday)
}
// Sunday sets the job start day Sunday
func (j *Job) Sunday() *Job {
return j.Weekday(time.Sunday)
}
// Lock prevents job to run from multiple instances of gocron
func (j *Job) Lock() *Job {
j.lock = true
return j
}

@ -1,253 +0,0 @@
package gocron
import (
"sort"
"time"
)
// Scheduler struct, the only data member is the list of jobs.
// - implements the sort.Interface{} for sorting jobs, by the time nextRun
type Scheduler struct {
jobs [MAXJOBNUM]*Job // Array store jobs
size int // Size of jobs which jobs holding.
loc *time.Location // Location to use when scheduling jobs with specified times
}
var (
defaultScheduler = NewScheduler()
)
// NewScheduler creates a new scheduler
func NewScheduler() *Scheduler {
return &Scheduler{
jobs: [MAXJOBNUM]*Job{},
size: 0,
loc: loc,
}
}
// Jobs returns the list of Jobs from the Scheduler
func (s *Scheduler) Jobs() []*Job {
return s.jobs[:s.size]
}
func (s *Scheduler) Len() int {
return s.size
}
func (s *Scheduler) Swap(i, j int) {
s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i]
}
func (s *Scheduler) Less(i, j int) bool {
return s.jobs[j].nextRun.Unix() >= s.jobs[i].nextRun.Unix()
}
// ChangeLoc changes the default time location
func (s *Scheduler) ChangeLoc(newLocation *time.Location) {
s.loc = newLocation
}
// Get the current runnable jobs, which shouldRun is True
func (s *Scheduler) getRunnableJobs() (runningJobs [MAXJOBNUM]*Job, n int) {
runnableJobs := [MAXJOBNUM]*Job{}
n = 0
sort.Sort(s)
for i := 0; i < s.size; i++ {
if s.jobs[i].shouldRun() {
runnableJobs[n] = s.jobs[i]
n++
} else {
break
}
}
return runnableJobs, n
}
// NextRun datetime when the next job should run.
func (s *Scheduler) NextRun() (*Job, time.Time) {
if s.size <= 0 {
return nil, time.Now()
}
sort.Sort(s)
return s.jobs[0], s.jobs[0].nextRun
}
// Every schedule a new periodic job with interval
func (s *Scheduler) Every(interval uint64) *Job {
job := NewJob(interval).Loc(s.loc)
s.jobs[s.size] = job
s.size++
return job
}
// RunPending runs all the jobs that are scheduled to run.
func (s *Scheduler) RunPending() {
runnableJobs, n := s.getRunnableJobs()
if n != 0 {
for i := 0; i < n; i++ {
go runnableJobs[i].run()
runnableJobs[i].lastRun = time.Now()
runnableJobs[i].scheduleNextRun()
}
}
}
// RunAll run all jobs regardless if they are scheduled to run or not
func (s *Scheduler) RunAll() {
s.RunAllwithDelay(0)
}
// RunAllwithDelay runs all jobs with delay seconds
func (s *Scheduler) RunAllwithDelay(d int) {
for i := 0; i < s.size; i++ {
go s.jobs[i].run()
if 0 != d {
time.Sleep(time.Duration(d))
}
}
}
// Remove specific job j by function
func (s *Scheduler) Remove(j interface{}) {
s.removeByCondition(func(someJob *Job) bool {
return someJob.jobFunc == getFunctionName(j)
})
}
// RemoveByRef removes specific job j by reference
func (s *Scheduler) RemoveByRef(j *Job) {
s.removeByCondition(func(someJob *Job) bool {
return someJob == j
})
}
func (s *Scheduler) removeByCondition(shouldRemove func(*Job) bool) {
i := 0
// keep deleting until no more jobs match the criteria
for {
found := false
for ; i < s.size; i++ {
if shouldRemove(s.jobs[i]) {
found = true
break
}
}
if !found {
return
}
for j := (i + 1); j < s.size; j++ {
s.jobs[i] = s.jobs[j]
i++
}
s.size--
s.jobs[s.size] = nil
}
}
// Scheduled checks if specific job j was already added
func (s *Scheduler) Scheduled(j interface{}) bool {
for _, job := range s.jobs {
if job.jobFunc == getFunctionName(j) {
return true
}
}
return false
}
// Clear delete all scheduled jobs
func (s *Scheduler) Clear() {
for i := 0; i < s.size; i++ {
s.jobs[i] = nil
}
s.size = 0
}
// Start all the pending jobs
// Add seconds ticker
func (s *Scheduler) Start() chan bool {
stopped := make(chan bool, 1)
ticker := time.NewTicker(1 * time.Second)
go func() {
for {
select {
case <-ticker.C:
s.RunPending()
case <-stopped:
ticker.Stop()
return
}
}
}()
return stopped
}
// The following methods are shortcuts for not having to
// create a Scheduler instance
// Every schedules a new periodic job running in specific interval
func Every(interval uint64) *Job {
return defaultScheduler.Every(interval)
}
// RunPending run all jobs that are scheduled to run
//
// Please note that it is *intended behavior that run_pending()
// does not run missed jobs*. For example, if you've registered a job
// that should run every minute and you only call run_pending()
// in one hour increments then your job won't be run 60 times in
// between but only once.
func RunPending() {
defaultScheduler.RunPending()
}
// RunAll run all jobs regardless if they are scheduled to run or not.
func RunAll() {
defaultScheduler.RunAll()
}
// RunAllwithDelay run all the jobs with a delay in seconds
//
// A delay of `delay` seconds is added between each job. This can help
// to distribute the system load generated by the jobs more evenly over
// time.
func RunAllwithDelay(d int) {
defaultScheduler.RunAllwithDelay(d)
}
// Start run all jobs that are scheduled to run
func Start() chan bool {
return defaultScheduler.Start()
}
// Clear all scheduled jobs
func Clear() {
defaultScheduler.Clear()
}
// Remove a specific job
func Remove(j interface{}) {
defaultScheduler.Remove(j)
}
// Scheduled checks if specific job j was already added
func Scheduled(j interface{}) bool {
for _, job := range defaultScheduler.jobs {
if job.jobFunc == getFunctionName(j) {
return true
}
}
return false
}
// NextRun gets the next running time
func NextRun() (job *Job, time time.Time) {
return defaultScheduler.NextRun()
}

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Nil Org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -1,112 +0,0 @@
package convert
import (
"bytes"
"encoding/binary"
"fmt"
"strconv"
)
// ToString 转换成string
func ToString(value interface{}) string {
if value == nil {
return ""
}
return fmt.Sprint(value)
}
// ToBool 转换成Bool
func ToBool(i interface{}) bool {
switch b := i.(type) {
case bool:
return b
case nil:
return false
case int:
if i.(int) != 0 {
return true
}
return false
case string:
v, err := strconv.ParseBool(ToString(i))
if err != nil {
return false
}
return v
default:
return false
}
}
// ToInt 转换成int
func ToInt(value interface{}) int {
return int(ToInt64(value))
}
// ToInt32 转换成int32
func ToInt32(value interface{}) int32 {
return int32(ToInt64(value))
}
// ToInt64 转换成int64
func ToInt64(value interface{}) int64 {
num, err := strconv.ParseInt(ToString(value), 10, 64)
if err != nil {
return 0
}
return num
}
// ToUint 转换成uint
func ToUint(value interface{}) uint {
return uint(ToUint64(value))
}
// ToUint32 转换成uint32
func ToUint32(value interface{}) uint32 {
return uint32(ToUint64(value))
}
// ToUint64 转换成uint64
func ToUint64(value interface{}) uint64 {
num, err := strconv.ParseUint(ToString(value), 10, 64)
if err != nil {
return 0
}
return num
}
// ToFloat32 转换成float32
func ToFloat32(value interface{}) float32 {
return float32(ToFloat64(value))
}
// ToFloat64 转换成float64
func ToFloat64(value interface{}) float64 {
num, err := strconv.ParseFloat(ToString(value), 64)
if err != nil {
return 0
}
return num
}
// BytesToInt32 字节转Int32
func BytesToInt32(data []byte) int32 {
var num int32
buffer := bytes.NewBuffer(data)
binary.Read(buffer, binary.BigEndian, &num)
return num
}
// BytesToInt 字节转Int
func BytesToInt(data []byte) int {
return int(BytesToInt32(data))
}
// BytesToInt64 字节转Int64
func BytesToInt64(data []byte) int64 {
var num int64
buffer := bytes.NewBuffer(data)
binary.Read(buffer, binary.BigEndian, &num)
return num
}

@ -1,9 +0,0 @@
This is a list of people who have contributed code to go-cache. They, or their
employers, are the copyright holders of the contributed code. Contributed code
is subject to the license restrictions listed in LICENSE (as they were when the
code was contributed.)
Dustin Sallings <dustin@spy.net>
Jason Mooberry <jasonmoo@me.com>
Sergey Shepelev <temotor@gmail.com>
Alex Edwards <ajmedwards@gmail.com>

@ -1,19 +0,0 @@
Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -1,83 +0,0 @@
# go-cache
go-cache is an in-memory key:value store/cache similar to memcached that is
suitable for applications running on a single machine. Its major advantage is
that, being essentially a thread-safe `map[string]interface{}` with expiration
times, it doesn't need to serialize or transmit its contents over the network.
Any object can be stored, for a given duration or forever, and the cache can be
safely used by multiple goroutines.
Although go-cache isn't meant to be used as a persistent datastore, the entire
cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
items map to serialize, and `NewFrom()` to create a cache from a deserialized
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
### Installation
`go get github.com/patrickmn/go-cache`
### Usage
```go
import (
"fmt"
"github.com/patrickmn/go-cache"
"time"
)
func main() {
// Create a cache with a default expiration time of 5 minutes, and which
// purges expired items every 10 minutes
c := cache.New(5*time.Minute, 10*time.Minute)
// Set the value of the key "foo" to "bar", with the default expiration time
c.Set("foo", "bar", cache.DefaultExpiration)
// Set the value of the key "baz" to 42, with no expiration time
// (the item won't be removed until it is re-set, or removed using
// c.Delete("baz")
c.Set("baz", 42, cache.NoExpiration)
// Get the string associated with the key "foo" from the cache
foo, found := c.Get("foo")
if found {
fmt.Println(foo)
}
// Since Go is statically typed, and cache values can be anything, type
// assertion is needed when values are being passed to functions that don't
// take arbitrary types, (i.e. interface{}). The simplest way to do this for
// values which will only be used once--e.g. for passing to another
// function--is:
foo, found := c.Get("foo")
if found {
MyFunction(foo.(string))
}
// This gets tedious if the value is used several times in the same function.
// You might do either of the following instead:
if x, found := c.Get("foo"); found {
foo := x.(string)
// ...
}
// or
var foo string
if x, found := c.Get("foo"); found {
foo = x.(string)
}
// ...
// foo can then be passed around freely as a string
// Want performance? Store pointers!
c.Set("foo", &MyStruct, cache.DefaultExpiration)
if x, found := c.Get("foo"); found {
foo := x.(*MyStruct)
// ...
}
}
```
### Reference
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)

File diff suppressed because it is too large Load Diff

@ -1,192 +0,0 @@
package cache
import (
"crypto/rand"
"math"
"math/big"
insecurerand "math/rand"
"os"
"runtime"
"time"
)
// This is an experimental and unexported (for now) attempt at making a cache
// with better algorithmic complexity than the standard one, namely by
// preventing write locks of the entire cache when an item is added. As of the
// time of writing, the overhead of selecting buckets results in cache
// operations being about twice as slow as for the standard cache with small
// total cache sizes, and faster for larger ones.
//
// See cache_test.go for a few benchmarks.
type unexportedShardedCache struct {
*shardedCache
}
type shardedCache struct {
seed uint32
m uint32
cs []*cache
janitor *shardedJanitor
}
// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
func djb33(seed uint32, k string) uint32 {
var (
l = uint32(len(k))
d = 5381 + seed + l
i = uint32(0)
)
// Why is all this 5x faster than a for loop?
if l >= 4 {
for i < l-4 {
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
d = (d * 33) ^ uint32(k[i+2])
d = (d * 33) ^ uint32(k[i+3])
i += 4
}
}
switch l - i {
case 1:
case 2:
d = (d * 33) ^ uint32(k[i])
case 3:
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
case 4:
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
d = (d * 33) ^ uint32(k[i+2])
}
return d ^ (d >> 16)
}
func (sc *shardedCache) bucket(k string) *cache {
return sc.cs[djb33(sc.seed, k)%sc.m]
}
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
sc.bucket(k).Set(k, x, d)
}
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Add(k, x, d)
}
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Replace(k, x, d)
}
func (sc *shardedCache) Get(k string) (interface{}, bool) {
return sc.bucket(k).Get(k)
}
func (sc *shardedCache) Increment(k string, n int64) error {
return sc.bucket(k).Increment(k, n)
}
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
return sc.bucket(k).IncrementFloat(k, n)
}
func (sc *shardedCache) Decrement(k string, n int64) error {
return sc.bucket(k).Decrement(k, n)
}
func (sc *shardedCache) Delete(k string) {
sc.bucket(k).Delete(k)
}
func (sc *shardedCache) DeleteExpired() {
for _, v := range sc.cs {
v.DeleteExpired()
}
}
// Returns the items in the cache. This may include items that have expired,
// but have not yet been cleaned up. If this is significant, the Expiration
// fields of the items should be checked. Note that explicit synchronization
// is needed to use a cache and its corresponding Items() return values at
// the same time, as the maps are shared.
func (sc *shardedCache) Items() []map[string]Item {
res := make([]map[string]Item, len(sc.cs))
for i, v := range sc.cs {
res[i] = v.Items()
}
return res
}
func (sc *shardedCache) Flush() {
for _, v := range sc.cs {
v.Flush()
}
}
type shardedJanitor struct {
Interval time.Duration
stop chan bool
}
func (j *shardedJanitor) Run(sc *shardedCache) {
j.stop = make(chan bool)
tick := time.Tick(j.Interval)
for {
select {
case <-tick:
sc.DeleteExpired()
case <-j.stop:
return
}
}
}
func stopShardedJanitor(sc *unexportedShardedCache) {
sc.janitor.stop <- true
}
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
j := &shardedJanitor{
Interval: ci,
}
sc.janitor = j
go j.Run(sc)
}
func newShardedCache(n int, de time.Duration) *shardedCache {
max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
rnd, err := rand.Int(rand.Reader, max)
var seed uint32
if err != nil {
os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
seed = insecurerand.Uint32()
} else {
seed = uint32(rnd.Uint64())
}
sc := &shardedCache{
seed: seed,
m: uint32(n),
cs: make([]*cache, n),
}
for i := 0; i < n; i++ {
c := &cache{
defaultExpiration: de,
items: map[string]Item{},
}
sc.cs[i] = c
}
return sc
}
func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
if defaultExpiration == 0 {
defaultExpiration = -1
}
sc := newShardedCache(shards, defaultExpiration)
SC := &unexportedShardedCache{sc}
if cleanupInterval > 0 {
runShardedJanitor(sc, cleanupInterval)
runtime.SetFinalizer(SC, stopShardedJanitor)
}
return SC
}

@ -7,6 +7,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog).
## [v0.7.40](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.39...v0.7.40) - 2022-11-15
1、upload file with contentlengthupdate crc log, add optional header
2、支持启用工作流和暂停工作流接口
### Commits
- add optional header [`c996f66`](https://github.com/tencentyun/cos-go-sdk-v5/commit/c996f66c8d02a04fc4de4e494872f74878238e31)
- 支持启用工作流和暂停工作流接口 [`a187403`](https://github.com/tencentyun/cos-go-sdk-v5/commit/a187403769e561d54f492d8324775dfa1ebd47ff)
- Updated CHANGELOG.md [`2631285`](https://github.com/tencentyun/cos-go-sdk-v5/commit/26312858daba267fedbfd0e4e19713138c2d9cb6)
- update crc [`3ee574e`](https://github.com/tencentyun/cos-go-sdk-v5/commit/3ee574e09c844561dd4152e9b2b95ea19711f945)
- upload file with contentlength [`86fec59`](https://github.com/tencentyun/cos-go-sdk-v5/commit/86fec599e274e4d49025d0e2e3d42d1de1334c4d)
## [v0.7.39](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.38...v0.7.39) - 2022-10-12
1、update multicopy retry、Delete Bucket option
@ -564,8 +577,6 @@ add delete with versionid & fix MultiUpload when filesize equal 0 & Bucket/Objec
## [v0.7.9](https://github.com/tencentyun/cos-go-sdk-v5/compare/v0.7.8...v0.7.9) - 2020-09-14
add bucket intelligenttiering
### Merged
- add bucket intelligenttiering [`#85`](https://github.com/tencentyun/cos-go-sdk-v5/pull/85)

@ -183,7 +183,7 @@ type ListVersionsResultVersion struct {
IsLatest bool `xml:"IsLatest,omitempty"`
LastModified string `xml:"LastModified,omitempty"`
ETag string `xml:"ETag,omitempty"`
Size int `xml:"Size,omitempty"`
Size int64 `xml:"Size,omitempty"`
StorageClass string `xml:"StorageClass,omitempty"`
Owner *Owner `xml:"Owner,omitempty"`
}

@ -215,6 +215,17 @@ type UserExtraInfo struct {
Role string `xml:",omitempty"`
}
// FreezeConf is auto freeze options
type FreezeConf struct {
PornScore string `xml:",omitempty"`
IllegalScore string `xml:",omitempty"`
TerrorismScore string `xml:",omitempty"`
PoliticsScore string `xml:",omitempty"`
AdsScore string `xml:",omitempty"`
AbuseScore string `xml:",omitempty"`
TeenagerScore string `xml:",omitempty"`
}
// ImageAuditingInputOptions is the option of BatchImageAuditingOptions
type ImageAuditingInputOptions struct {
DataId string `xml:",omitempty"`
@ -229,10 +240,11 @@ type ImageAuditingInputOptions struct {
// ImageAuditingJobConf is the config of BatchImageAuditingOptions
type ImageAuditingJobConf struct {
DetectType string `xml:",omitempty"`
BizType string `xml:",omitempty"`
Async int `xml:",omitempty"`
Callback string `xml:",omitempty"`
DetectType string `xml:",omitempty"`
BizType string `xml:",omitempty"`
Async int `xml:",omitempty"`
Callback string `xml:",omitempty"`
Freeze *FreezeConf `xml:",omitempty"`
}
// BatchImageAuditingOptions is the option of BatchImageAuditing
@ -265,6 +277,7 @@ type ImageAuditingResult struct {
CompressionResult int `xml:",omitempty"`
UserInfo *UserExtraInfo `xml:",omitempty"`
ListInfo *UserListInfo `xml:",omitempty"`
ForbidState int `xml:",omitempty"`
}
// BatchImageAuditingJobResult is the result of BatchImageAuditing
@ -317,6 +330,7 @@ type PutVideoAuditingJobOptions struct {
InputUserInfo *UserExtraInfo `xml:"Input>UserInfo,omitempty"`
Conf *VideoAuditingJobConf `xml:"Conf"`
Type string `xml:"Type,omitempty"`
StorageConf *StorageConf `xml:"StorageConf,omitempty"`
}
// VideoAuditingJobConf is the config of PutVideoAuditingJobOptions
@ -328,6 +342,7 @@ type VideoAuditingJobConf struct {
CallbackType int `xml:",omitempty"`
BizType string `xml:",omitempty"`
DetectContent int `xml:",omitempty"`
Freeze *FreezeConf `xml:",omitempty"`
}
// PutVideoAuditingJobSnapshot is the snapshot config of VideoAuditingJobConf
@ -337,6 +352,11 @@ type PutVideoAuditingJobSnapshot struct {
TimeInterval float32 `xml:",omitempty"`
}
// StorageConf is live video storage config of PutVideoAuditingJobOptions
type StorageConf struct {
Path string `xml:",omitempty"`
}
// PutVideoAuditingJobResult is the result of PutVideoAuditingJob
type PutVideoAuditingJobResult struct {
XMLName xml.Name `xml:"Response"`
@ -394,6 +414,7 @@ type AuditingJobDetail struct {
UserInfo *UserExtraInfo `xml:",omitempty"`
Type string `xml:",omitempty"`
ListInfo *UserListInfo `xml:",omitempty"`
ForbidState int `xml:",omitempty"`
}
// GetVideoAuditingJobSnapshot is the snapshot result of AuditingJobDetail
@ -417,6 +438,7 @@ type AudioSectionResult struct {
OffsetTime int `xml:",omitempty"`
Duration int `xml:",omitempty"`
Label string `xml:",omitempty"`
SubLabel string `xml:",omitempty"`
Result int `xml:",omitempty"`
PornInfo *RecognitionInfo `xml:",omitempty"`
TerrorismInfo *RecognitionInfo `xml:",omitempty"`
@ -464,11 +486,12 @@ type PutAudioAuditingJobOptions struct {
// AudioAuditingJobConf is the config of PutAudioAuditingJobOptions
type AudioAuditingJobConf struct {
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
CallbackVersion string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
BizType string `xml:",omitempty"`
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
CallbackVersion string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
BizType string `xml:",omitempty"`
Freeze *FreezeConf `xml:",omitempty"`
}
// PutAudioAuditingJobResult is the result of PutAudioAuditingJob
@ -517,6 +540,7 @@ type AudioAuditingJobDetail struct {
Section []AudioSectionResult `xml:",omitempty"`
UserInfo *UserExtraInfo `xml:",omitempty"`
ListInfo *UserListInfo `xml:",omitempty"`
ForbidState int `xml:",omitempty"`
}
// LanguageResult 语种识别结果
@ -553,11 +577,12 @@ type PutTextAuditingJobOptions struct {
// TextAuditingJobConf is the config of PutAudioAuditingJobOptions
type TextAuditingJobConf struct {
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
CallbackVersion string `xml:",omitempty"`
BizType string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
CallbackVersion string `xml:",omitempty"`
BizType string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
Freeze *FreezeConf `xml:",omitempty"`
}
// PutTextAuditingJobResult is the result of PutTextAuditingJob
@ -607,6 +632,7 @@ type TextAuditingJobDetail struct {
Section []TextSectionResult `xml:",omitempty"`
UserInfo *UserExtraInfo `xml:",omitempty"`
ListInfo *UserListInfo `xml:",omitempty"`
ForbidState int `xml:",omitempty"`
}
// TextLibResult
@ -666,10 +692,11 @@ type PutDocumentAuditingJobOptions struct {
// DocumentAuditingJobConf is the config of PutDocumentAuditingJobOptions
type DocumentAuditingJobConf struct {
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
BizType string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
DetectType string `xml:",omitempty"`
Callback string `xml:",omitempty"`
BizType string `xml:",omitempty"`
CallbackType int `xml:",omitempty"`
Freeze *FreezeConf `xml:",omitempty"`
}
// PutDocumentAuditingJobResult is the result of PutDocumentAuditingJob
@ -713,6 +740,7 @@ type DocumentAuditingJobDetail struct {
PageSegment *DocumentPageSegmentInfo `xml:",omitempty"`
UserInfo *UserExtraInfo `xml:",omitempty"`
ListInfo *UserListInfo `xml:",omitempty"`
ForbidState int `xml:",omitempty"`
}
// DocumentResultInfo
@ -1882,6 +1910,11 @@ func (s *CIService) LivenessRecognitionWhenUpload(ctx context.Context, obj, file
return &res, resp, err
}
type GoodsMattingptions struct {
CenterLayout string `url:"center-layout,omitempty"`
PaddingLayout string `url:"padding-layout,omitempty"`
}
// GoodsMatting 商品抠图
func (s *CIService) GoodsMatting(ctx context.Context, key string) (*Response, error) {
sendOpt := sendOptions{
@ -1893,3 +1926,16 @@ func (s *CIService) GoodsMatting(ctx context.Context, key string) (*Response, er
resp, err := s.client.send(ctx, &sendOpt)
return resp, err
}
// GoodsMattingWithOpt 商品抠图
func (s *CIService) GoodsMattingWithOpt(ctx context.Context, key string, opt *GoodsMattingptions) (*Response, error) {
sendOpt := sendOptions{
baseURL: s.client.BaseURL.BucketURL,
uri: "/" + encodeURIComponent(key) + "?ci-process=GoodsMatting",
optQuery: opt,
method: http.MethodGet,
disableCloseBody: true,
}
resp, err := s.client.send(ctx, &sendOpt)
return resp, err
}

@ -0,0 +1,150 @@
package cos
import (
"context"
"encoding/xml"
"net/http"
)
type FileHashCodeConfig struct {
Type string `xml:",omitempty"`
AddToHeader bool `xml:",omitempty"`
}
type FileHashCodeResult struct {
MD5 string `xml:",omitempty"`
SHA1 string `xml:",omitempty"`
SHA256 string `xml:",omitempty"`
FileSize int `xml:",omitempty"`
LastModified string `xml:",omitempty"`
Etag string `xml:",omitempty"`
}
type FileUncompressConfig struct {
Prefix string `xml:",omitempty"`
PrefixReplaced string `xml:",omitempty"`
}
type FileUncompressResult struct {
Region string `xml:",omitempty"`
Bucket string `xml:",omitempty"`
FileCount string `xml:",omitempty"`
}
type FileCompressConfig struct {
Flatten string `xml:",omitempty"`
Format string `xml:",omitempty"`
UrlList string `xml:",omitempty"`
Prefix string `xml:",omitempty"`
Key string `xml:",omitempty"`
}
type FileCompressResult struct {
Region string `xml:",omitempty"`
Bucket string `xml:",omitempty"`
Object string `xml:",omitempty"`
}
type FileProcessInput FileCompressResult
type FileProcessOutput FileCompressResult
type FileProcessJobOperation struct {
FileHashCodeConfig *FileHashCodeConfig `xml:",omitempty"`
FileHashCodeResult *FileHashCodeResult `xml:",omitempty"`
FileUncompressConfig *FileUncompressConfig `xml:",omitempty"`
FileUncompressResult *FileUncompressResult `xml:",omitempty"`
FileCompressConfig *FileCompressConfig `xml:",omitempty"`
FileCompressResult *FileCompressResult `xml:",omitempty"`
Output *FileProcessOutput `xml:",omitempty"`
UserData string `xml:",omitempty"`
}
type FileProcessJobOptions struct {
XMLName xml.Name `xml:"Request"`
Tag string `xml:",omitempty"`
Input *FileProcessInput `xml:",omitempty"`
Operation *FileProcessJobOperation `xml:",omitempty"`
QueueId string `xml:",omitempty"`
CallBackFormat string `xml:",omitempty"`
CallBackType string `xml:",omitempty"`
CallBack string `xml:",omitempty"`
CallBackMqConfig string `xml:",omitempty"`
}
type FileProcessJobResult struct {
XMLName xml.Name `xml:"Response"`
JobsDetail *FileProcessJobsDetail `xml:",omitempty"`
}
type FileProcessJobsDetail struct {
Code string `xml:",omitempty"`
Message string `xml:",omitempty"`
JobId string `xml:",omitempty"`
Tag string `xml:",omitempty"`
State string `xml:",omitempty"`
CreationTime string `xml:",omitempty"`
StartTime string `xml:",omitempty"`
EndTime string `xml:",omitempty"`
QueueId string `xml:",omitempty"`
Input *FileProcessInput `xml:",omitempty"`
Operation *FileProcessJobOperation `xml:",omitempty"`
}
// 提交哈希值计算任务 https://cloud.tencent.com/document/product/436/83108
// 提交文件解压任务 https://cloud.tencent.com/document/product/436/83110
// 提交多文件打包压缩任务 https://cloud.tencent.com/document/product/436/83112
func (s *CIService) CreateFileProcessJob(ctx context.Context, opt *FileProcessJobOptions) (*FileProcessJobResult, *Response, error) {
var res FileProcessJobResult
sendOpt := sendOptions{
baseURL: s.client.BaseURL.CIURL,
uri: "/file_jobs",
method: http.MethodPost,
body: opt,
result: &res,
}
resp, err := s.client.send(ctx, &sendOpt)
return &res, resp, err
}
// 查询哈希值计算结果 https://cloud.tencent.com/document/product/436/83109
// 查询文件解压结果 https://cloud.tencent.com/document/product/436/83111
// 查询多文件打包压缩结果 https://cloud.tencent.com/document/product/436/83113
func (s *CIService) DescribeFileProcessJob(ctx context.Context, jobid string) (*FileProcessJobResult, *Response, error) {
var res FileProcessJobResult
sendOpt := sendOptions{
baseURL: s.client.BaseURL.CIURL,
uri: "/file_jobs/" + jobid,
method: http.MethodGet,
result: &res,
}
resp, err := s.client.send(ctx, &sendOpt)
return &res, resp, err
}
// GetFileHashOptions is the option of GetFileHash
type GetFileHashOptions struct {
CIProcess string `url:"ci-process,omitempty"`
Type string `url:"type,omitempty"`
AddToHeader bool `url:"addtoheader,omitempty"`
}
// GetFileHashResult is the result of GetFileHash
type GetFileHashResult struct {
XMLName xml.Name `xml:"Response"`
FileHashCodeResult *FileHashCodeResult `xml:",omitempty"`
Input *FileProcessInput `xml:",omitempty"`
}
// 哈希值计算同步请求 https://cloud.tencent.com/document/product/436/83107
func (s *CIService) GetFileHash(ctx context.Context, name string, opt *GetFileHashOptions) (*GetFileHashResult, *Response, error) {
var res GetFileHashResult
sendOpt := sendOptions{
baseURL: s.client.BaseURL.BucketURL,
uri: "/" + encodeURIComponent(name),
method: http.MethodGet,
optQuery: opt,
result: &res,
}
resp, err := s.client.send(ctx, &sendOpt)
return &res, resp, err
}

@ -49,23 +49,27 @@ type Container struct {
// Video TODO
type Video struct {
Codec string `xml:"Codec"`
Width string `xml:"Width,omitempty"`
Height string `xml:"Height,omitempty"`
Fps string `xml:"Fps,omitempty"`
Remove string `xml:"Remove,omitempty"`
Profile string `xml:"Profile,omitempty"`
Bitrate string `xml:"Bitrate,omitempty"`
Crf string `xml:"Crf,omitempty"`
Gop string `xml:"Gop,omitempty"`
Preset string `xml:"Preset,omitempty"`
Bufsize string `xml:"Bufsize,omitempty"`
Maxrate string `xml:"Maxrate,omitempty"`
HlsTsTime string `xml:"HlsTsTime,omitempty"`
DashSegment string `xml:"DashSegment,omitempty"`
Pixfmt string `xml:"Pixfmt,omitempty"`
LongShortMode string `xml:"LongShortMode,omitempty"`
Rotate string `xml:"Rotate,omitempty"`
Codec string `xml:"Codec"`
Width string `xml:"Width,omitempty"`
Height string `xml:"Height,omitempty"`
Fps string `xml:"Fps,omitempty"`
Remove string `xml:"Remove,omitempty"`
Profile string `xml:"Profile,omitempty"`
Bitrate string `xml:"Bitrate,omitempty"`
Crf string `xml:"Crf,omitempty"`
Gop string `xml:"Gop,omitempty"`
Preset string `xml:"Preset,omitempty"`
Bufsize string `xml:"Bufsize,omitempty"`
Maxrate string `xml:"Maxrate,omitempty"`
HlsTsTime string `xml:"HlsTsTime,omitempty"`
DashSegment string `xml:"DashSegment,omitempty"`
Pixfmt string `xml:"Pixfmt,omitempty"`
LongShortMode string `xml:"LongShortMode,omitempty"`
Rotate string `xml:"Rotate,omitempty"`
AnimateOnlyKeepKeyFrame string `xml:"AnimateOnlyKeepKeyFrame"`
AnimateTimeIntervalOfFrame string `xml:"AnimateTimeIntervalOfFrame"`
AnimateFramesPerSecond string `xml:"AnimateFramesPerSecond"`
Quality string `xml:"Quality"`
}
// TranscodeProVideo TODO
@ -125,7 +129,7 @@ type Transcode struct {
TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"`
Audio *Audio `xml:"Audio,omitempty"`
TransConfig *TransConfig `xml:"TransConfig,omitempty"`
AudioMix []AudioMix `xml:"AudioMix,omitempty"`
AudioMix *AudioMix `xml:"AudioMix,omitempty"`
}
// Image TODO
@ -241,7 +245,7 @@ type ConcatTemplate struct {
Video *Video `xml:"Video,omitempty"`
Container *Container `xml:"Container,omitempty"`
Index string `xml:"Index,omitempty"`
AudioMix []AudioMix `xml:"AudioMix,omitempty"`
AudioMix *AudioMix `xml:"AudioMix,omitempty"`
}
// SpriteSnapshotConfig TODO
@ -322,7 +326,7 @@ type VideoMontage struct {
Video *VideoMontageVideo `xml:"Video,omitempty"`
Audio *Audio `xml:"Audio,omitempty"`
Duration string `xml:"Duration,omitempty"`
AudioMix []AudioMix `xml:"AudioMix,omitempty"`
AudioMix *AudioMix `xml:"AudioMix,omitempty"`
}
// AudioConfig TODO
@ -469,6 +473,11 @@ type QualityEstimate struct {
Score string `xml:"Score,omitempty"`
}
// QualityEstimate TODO
type QualityEstimateConfig struct {
Rotate string `xml:"Rotate,omitempty"`
}
// MediaResult TODO
type MediaResult struct {
OutputFile struct {
@ -633,6 +642,7 @@ type MediaProcessJobOperation struct {
Translation *Translation `xml:"Translation,omitempty"`
WordsGeneralize *WordsGeneralize `xml:"WordsGeneralize,omitempty"`
WordsGeneralizeResult *WordsGeneralizeResult `xml:"WordsGeneralizeResult,omitempty"`
QualityEstimateConfig *QualityEstimateConfig `xml:"QualityEstimateConfig,omitempty"`
}
// CreatePicJobsOptions TODO
@ -1771,7 +1781,7 @@ type CreateMediaTranscodeTemplateOptions struct {
Audio *Audio `xml:"Audio,omitempty"`
TimeInterval *TimeInterval `xml:"TimeInterval,omitempty"`
TransConfig *TransConfig `xml:"TransConfig,omitempty"`
AudioMix []AudioMix `xml:"AudioMix,omitempty"`
AudioMix *AudioMix `xml:"AudioMix,omitempty"`
}
// CreateMediaAnimationTemplateOptions TODO
@ -1810,7 +1820,7 @@ type CreateMediaVideoMontageTemplateOptions struct {
Container *Container `xml:"Container,omitempty"`
Video *Video `xml:"Video,omitempty"`
Audio *Audio `xml:"Audio,omitempty"`
AudioMix []AudioMix `xml:"AudioMix,omitempty"`
AudioMix *AudioMix `xml:"AudioMix,omitempty"`
}
// CreateMediaVoiceSeparateTemplateOptions TODO

@ -24,7 +24,7 @@ import (
const (
// Version current go sdk version
Version = "0.7.40"
Version = "0.7.41"
UserAgent = "cos-go-sdk-v5/" + Version
contentTypeXML = "application/xml"
defaultServiceBaseURL = "http://service.cos.myqcloud.com"

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 UPYUN
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -1,84 +0,0 @@
package upyun
import (
"fmt"
"sort"
"strings"
)
type RESTAuthConfig struct {
Method string
Uri string
DateStr string
LengthStr string
}
type PurgeAuthConfig struct {
PurgeList string
DateStr string
}
type UnifiedAuthConfig struct {
Method string
Uri string
DateStr string
Policy string
ContentMD5 string
}
func (u *UpYun) MakeRESTAuth(config *RESTAuthConfig) string {
sign := []string{
config.Method,
config.Uri,
config.DateStr,
config.LengthStr,
u.Password,
}
return "UpYun " + u.Operator + ":" + md5Str(strings.Join(sign, "&"))
}
func (u *UpYun) MakePurgeAuth(config *PurgeAuthConfig) string {
sign := []string{
config.PurgeList,
u.Bucket,
config.DateStr,
u.Password,
}
return "UpYun " + u.Bucket + ":" + u.Operator + ":" + md5Str(strings.Join(sign, "&"))
}
func (u *UpYun) MakeFormAuth(policy string) string {
return md5Str(base64ToStr([]byte(policy)) + "&" + u.Secret)
}
func (u *UpYun) MakeProcessAuth(kwargs map[string]string) string {
keys := []string{}
for k := range kwargs {
keys = append(keys, k)
}
sort.Strings(keys)
auth := ""
for _, k := range keys {
auth += k + kwargs[k]
}
return fmt.Sprintf("UpYun %s:%s", u.Operator, md5Str(u.Operator+auth+u.Password))
}
func (u *UpYun) MakeUnifiedAuth(config *UnifiedAuthConfig) string {
sign := []string{
config.Method,
config.Uri,
config.DateStr,
config.Policy,
config.ContentMD5,
}
signNoEmpty := []string{}
for _, v := range sign {
if v != "" {
signNoEmpty = append(signNoEmpty, v)
}
}
signStr := base64ToStr(hmacSha1(u.Password, []byte(strings.Join(signNoEmpty, "&"))))
return "UpYun " + u.Operator + ":" + signStr
}

@ -1,78 +0,0 @@
package upyun
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
)
type Error struct {
Code int `json:"code"`
Message string `json:"msg"`
RequestID string `json:"id"`
Operation string
StatusCode int
Header http.Header
Body []byte
}
func (e *Error) Error() string {
if e.Operation == "" {
e.Operation = "upyun api"
}
return fmt.Sprintf("%s error: status=%d, code=%d, message=%s, request-id=%s",
e.Operation, e.StatusCode, e.Code, e.Message, e.RequestID)
}
func checkResponse(res *http.Response) error {
if res.StatusCode >= 200 && res.StatusCode <= 299 {
return nil
}
uerr := new(Error)
uerr.StatusCode = res.StatusCode
uerr.Header = res.Header
defer res.Body.Close()
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return uerr
}
uerr.Body = slurp
json.Unmarshal(slurp, uerr)
return uerr
}
func checkStatusCode(err error, status int) bool {
if err == nil {
return false
}
ae, ok := err.(*Error)
return ok && ae.StatusCode == status
}
func IsNotExist(err error) bool {
return checkStatusCode(err, http.StatusNotFound)
}
func IsNotModified(err error) bool {
return checkStatusCode(err, http.StatusNotModified)
}
func IsTooManyRequests(err error) bool {
return checkStatusCode(err, http.StatusTooManyRequests)
}
func errorOperation(op string, err error) error {
if err == nil {
return errors.New(op)
}
ae, ok := err.(*Error)
if ok {
ae.Operation = op
return ae
}
return fmt.Errorf("%s: %w", op, err)
}

@ -1,72 +0,0 @@
package upyun
import (
"net/http"
"strings"
"time"
)
type FileInfo struct {
Name string
Size int64
ContentType string
IsDir bool
IsEmptyDir bool
MD5 string
Time time.Time
Meta map[string]string
/* image information */
ImgType string
ImgWidth int64
ImgHeight int64
ImgFrames int64
}
/*
Content-Type: image/gif
ETag: "dc9ea7257aa6da18e74505259b04a946"
x-upyun-file-type: GIF
x-upyun-height: 379
x-upyun-width: 500
x-upyun-frames: 90
*/
func parseHeaderToFileInfo(header http.Header, getinfo bool) *FileInfo {
fInfo := &FileInfo{}
for k, v := range header {
lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-upyun-meta-") {
if fInfo.Meta == nil {
fInfo.Meta = make(map[string]string)
}
fInfo.Meta[lk] = v[0]
}
}
if getinfo {
// HTTP HEAD
fInfo.Size = parseStrToInt(header.Get("x-upyun-file-size"))
fInfo.IsDir = header.Get("x-upyun-file-type") == "folder"
fInfo.Time = time.Unix(parseStrToInt(header.Get("x-upyun-file-date")), 0)
fInfo.ContentType = header.Get("Content-Type")
fInfo.MD5 = header.Get("Content-MD5")
} else {
fInfo.Size = parseStrToInt(header.Get("Content-Length"))
fInfo.ContentType = header.Get("Content-Type")
fInfo.MD5 = strings.ReplaceAll(header.Get("Content-Md5"), "\"", "")
if fInfo.MD5 == "" {
fInfo.MD5 = strings.ReplaceAll(header.Get("Etag"), "\"", "")
}
lastM := header.Get("Last-Modified")
t, err := http.ParseTime(lastM)
if err == nil {
fInfo.Time = t
}
fInfo.ImgType = header.Get("x-upyun-file-type")
fInfo.ImgWidth = parseStrToInt(header.Get("x-upyun-width"))
fInfo.ImgHeight = parseStrToInt(header.Get("x-upyun-height"))
fInfo.ImgFrames = parseStrToInt(header.Get("x-upyun-frames"))
}
return fInfo
}

@ -1,148 +0,0 @@
package upyun
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"time"
)
type FormUploadConfig struct {
LocalPath string
SaveKey string
ExpireAfterSec int64
NotifyUrl string
Apps []map[string]interface{}
Options map[string]interface{}
}
type FormUploadResp struct {
Code int `json:"code"`
Msg string `json:"message"`
Url string `json:"url"`
Timestamp int64 `json:"time"`
ImgWidth int `json:"image-width"`
ImgHeight int `json:"image-height"`
ImgFrames int `json:"image-frames"`
ImgType string `json:"image-type"`
Sign string `json:"sign"`
Taskids []string `json:"task_ids"`
}
func (config *FormUploadConfig) Format() {
if config.Options == nil {
config.Options = make(map[string]interface{})
}
if config.SaveKey != "" {
config.Options["save-key"] = config.SaveKey
}
if config.NotifyUrl != "" {
config.Options["notify-url"] = config.NotifyUrl
}
if config.ExpireAfterSec > 0 {
config.Options["expiration"] = time.Now().Unix() + config.ExpireAfterSec
}
if len(config.Apps) > 0 {
config.Options["apps"] = config.Apps
}
}
func (up *UpYun) FormUpload(config *FormUploadConfig) (*FormUploadResp, error) {
config.Format()
config.Options["bucket"] = up.Bucket
args, err := json.Marshal(config.Options)
if err != nil {
return nil, err
}
policy := base64ToStr(args)
formValues := make(map[string]string)
formValues["policy"] = policy
formValues["file"] = config.LocalPath
if up.deprecated {
formValues["signature"] = up.MakeFormAuth(policy)
} else {
sign := &UnifiedAuthConfig{
Method: "POST",
Uri: "/" + up.Bucket,
Policy: policy,
}
if v, ok := config.Options["date"]; ok {
sign.DateStr = v.(string)
}
if v, ok := config.Options["content-md5"]; ok {
sign.ContentMD5 = v.(string)
}
formValues["authorization"] = up.MakeUnifiedAuth(sign)
}
endpoint := up.doGetEndpoint("v0.api.upyun.com")
url := fmt.Sprintf("http://%s/%s", endpoint, up.Bucket)
resp, err := up.doFormRequest(url, formValues)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, errorOperation("form read body", err)
}
var r FormUploadResp
err = json.Unmarshal(b, &r)
return &r, err
}
func (up *UpYun) doFormRequest(url string, formValues map[string]string) (*http.Response, error) {
formBody := &bytes.Buffer{}
formWriter := multipart.NewWriter(formBody)
defer formWriter.Close()
for k, v := range formValues {
if k != "file" {
formWriter.WriteField(k, v)
}
}
boundary := formWriter.Boundary()
bdBuf := bytes.NewBufferString(fmt.Sprintf("\r\n--%s--\r\n", boundary))
fpath := formValues["file"]
fd, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer fd.Close()
fInfo, err := fd.Stat()
if err != nil {
return nil, err
}
_, err = formWriter.CreateFormFile("file", filepath.Base(fpath))
if err != nil {
return nil, err
}
headers := map[string]string{
"Content-Type": "multipart/form-data; boundary=" + boundary,
"Content-Length": fmt.Sprint(formBody.Len() + int(fInfo.Size()) + bdBuf.Len()),
}
body := io.MultiReader(formBody, fd, bdBuf)
resp, err := up.doHTTPRequest("POST", url, headers, body)
if err != nil {
return nil, errorOperation("form", err)
}
return resp, nil
}

@ -1,82 +0,0 @@
package upyun
import (
"bytes"
"io"
"net/http"
"os"
"strconv"
"strings"
)
func (up *UpYun) doHTTPRequest(method, url string, headers map[string]string,
body io.Reader) (resp *http.Response, err error) {
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
for k, v := range headers {
if strings.ToLower(k) == "host" {
req.Host = v
} else {
req.Header.Set(k, v)
}
}
req.Header.Set("User-Agent", up.UserAgent)
if method == "PUT" || method == "POST" {
found := false
length := req.Header.Get("Content-Length")
if length != "" {
req.ContentLength, _ = strconv.ParseInt(length, 10, 64)
found = true
} else {
switch v := body.(type) {
case *os.File:
if fInfo, err := v.Stat(); err == nil {
req.ContentLength = fInfo.Size()
found = true
}
case UpYunPutReader:
req.ContentLength = int64(v.Len())
found = true
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
found = true
case *bytes.Reader:
req.ContentLength = int64(v.Len())
found = true
case *strings.Reader:
req.ContentLength = int64(v.Len())
found = true
case *io.LimitedReader:
req.ContentLength = v.N
found = true
}
}
if found && req.ContentLength == 0 {
req.Body = nil
}
}
// fmt.Printf("%+v\n", req)
resp, err = up.httpc.Do(req)
if err != nil {
return nil, err
}
err = checkResponse(resp)
if err != nil {
return nil, err
}
return resp, nil
}
func (up *UpYun) doGetEndpoint(host string) string {
s := up.Hosts[host]
if s != "" {
return s
}
return host
}

@ -1,78 +0,0 @@
package upyun
import (
"fmt"
"io"
"os"
)
type UpYunPutReader interface {
Len() (n int)
MD5() (ret string)
Read([]byte) (n int, err error)
Copyed() (n int)
}
type fragmentFile struct {
realFile *os.File
offset int64
limit int64
cursor int64
}
func (f *fragmentFile) Seek(offset int64, whence int) (ret int64, err error) {
switch whence {
case 0:
f.cursor = offset
ret, err = f.realFile.Seek(f.offset+f.cursor, 0)
return ret - f.offset, err
default:
return 0, fmt.Errorf("whence must be 0")
}
}
func (f *fragmentFile) Read(b []byte) (n int, err error) {
if f.cursor >= f.limit {
return 0, io.EOF
}
n, err = f.realFile.Read(b)
if f.cursor+int64(n) > f.limit {
n = int(f.limit - f.cursor)
}
f.cursor += int64(n)
return n, err
}
func (f *fragmentFile) Stat() (fInfo os.FileInfo, err error) {
return fInfo, fmt.Errorf("fragmentFile not implement Stat()")
}
func (f *fragmentFile) Close() error {
return nil
}
func (f *fragmentFile) Copyed() int {
return int(f.cursor - f.offset)
}
func (f *fragmentFile) Len() int {
return int(f.limit - f.offset)
}
func (f *fragmentFile) MD5() string {
s, _ := md5File(f)
return s
}
func newFragmentFile(file *os.File, offset, limit int64) (*fragmentFile, error) {
f := &fragmentFile{
realFile: file,
offset: offset,
limit: limit,
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
return f, nil
}

@ -1,230 +0,0 @@
package upyun
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"path"
"strings"
"time"
)
type CommitTasksConfig struct {
AppName string
Accept string
Source string
NotifyUrl string
Tasks []interface{}
}
type LiveauditCreateTask struct {
Source string
SaveAs string
NotifyUrl string
Interval string
Resize string
}
type LiveauditCancelTask struct {
TaskId string
}
type SyncCommonTask struct {
Kwargs map[string]interface{}
TaskUri string
}
func (up *UpYun) CommitTasks(config *CommitTasksConfig) (taskIds []string, err error) {
b, err := json.Marshal(config.Tasks)
if err != nil {
return nil, err
}
kwargs := map[string]string{
"app_name": config.AppName,
"tasks": base64ToStr(b),
"notify_url": config.NotifyUrl,
// for naga
"source": config.Source,
}
if config.Accept != "" {
kwargs["accept"] = config.Accept
}
err = up.doProcessRequest("POST", "/pretreatment/", kwargs, &taskIds)
return
}
func (up *UpYun) GetProgress(taskIds []string) (result map[string]int, err error) {
kwargs := map[string]string{
"task_ids": strings.Join(taskIds, ","),
}
v := map[string]map[string]int{}
err = up.doProcessRequest("GET", "/status/", kwargs, &v)
if err != nil {
return
}
if r, ok := v["tasks"]; ok {
return r, err
}
return nil, fmt.Errorf("no tasks")
}
func (up *UpYun) GetResult(taskIds []string) (result map[string]interface{}, err error) {
kwargs := map[string]string{
"task_ids": strings.Join(taskIds, ","),
}
v := map[string]map[string]interface{}{}
err = up.doProcessRequest("GET", "/result/", kwargs, &v)
if err != nil {
return
}
if r, ok := v["tasks"]; ok {
return r, err
}
return nil, fmt.Errorf("no tasks")
}
func (up *UpYun) doProcessRequest(method, uri string,
kwargs map[string]string, v interface{}) error {
if _, ok := kwargs["service"]; !ok {
kwargs["service"] = up.Bucket
}
if method == "GET" {
uri = addQueryToUri(uri, kwargs)
}
headers := make(map[string]string)
headers["Date"] = makeRFC1123Date(time.Now())
headers["Content-Type"] = "application/x-www-form-urlencoded"
if up.deprecated {
headers["Authorization"] = up.MakeProcessAuth(kwargs)
} else {
headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{
Method: method,
Uri: uri,
DateStr: headers["Date"],
})
}
var resp *http.Response
var err error
endpoint := up.doGetEndpoint("p0.api.upyun.com")
rawurl := fmt.Sprintf("http://%s%s", endpoint, uri)
switch method {
case "GET":
resp, err = up.doHTTPRequest(method, rawurl, headers, nil)
case "POST":
payload := encodeQueryToPayload(kwargs)
resp, err = up.doHTTPRequest(method, rawurl, headers, bytes.NewBufferString(payload))
default:
return fmt.Errorf("Unknown method")
}
if err != nil {
return errorOperation("process", err)
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return errorOperation("process read body", err)
}
return json.Unmarshal(b, v)
}
func (up *UpYun) CommitSyncTasks(commitTask interface{}) (result map[string]interface{}, err error) {
var kwargs map[string]interface{}
var uri string
var payload string
switch taskConfig := commitTask.(type) {
case LiveauditCreateTask:
kwargs = map[string]interface{}{
"source": taskConfig.Source,
"save_as": taskConfig.SaveAs,
"notify_url": taskConfig.NotifyUrl,
"service": up.Bucket,
}
if taskConfig.Interval != "" {
kwargs["interval"] = taskConfig.Interval
}
if taskConfig.Resize != "" {
kwargs["resize"] = taskConfig.Resize
}
uri = path.Join("/", up.Bucket, "/liveaudit/create")
case LiveauditCancelTask:
kwargs = map[string]interface{}{
"task_id": taskConfig.TaskId,
"service": up.Bucket,
}
uri = path.Join("/", up.Bucket, "/liveaudit/cancel")
case SyncCommonTask:
kwargs = taskConfig.Kwargs
uri = path.Join("/", up.Bucket, taskConfig.TaskUri)
default:
err = fmt.Errorf("don't match any task")
return nil, err
}
if _, exist := kwargs["service"]; !exist {
kwargs["service"] = up.Bucket
}
body, err := json.Marshal(kwargs)
if err != nil {
return nil, fmt.Errorf("can't encode the json")
}
payload = string(body)
return up.doSyncProcessRequest("POST", uri, payload)
}
func (up *UpYun) doSyncProcessRequest(method, uri string, payload string) (map[string]interface{}, error) {
headers := make(map[string]string)
headers["Date"] = makeRFC1123Date(time.Now())
headers["Content-Type"] = "application/json"
headers["Content-MD5"] = md5Str(payload)
headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{
Method: method,
Uri: uri,
DateStr: headers["Date"],
ContentMD5: headers["Content-MD5"],
})
var resp *http.Response
var err error
endpoint := up.doGetEndpoint("p1.api.upyun.com")
rawurl := fmt.Sprintf("http://%s%s", endpoint, uri)
switch method {
case "POST":
resp, err = up.doHTTPRequest(method, rawurl, headers, strings.NewReader(payload))
default:
return nil, fmt.Errorf("Unknown method")
}
if err != nil {
return nil, errorOperation("sync process", err)
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
var v map[string]interface{}
err = json.Unmarshal(b, &v)
if err != nil {
fmt.Println("can't unmarshal the data", string(b))
}
return v, err
}

@ -1,53 +0,0 @@
package upyun
import (
"encoding/json"
"io/ioutil"
URL "net/url"
"strings"
"time"
)
// TODO
func (up *UpYun) Purge(urls []string) (fails []string, err error) {
purge := "http://purge.upyun.com/purge/"
date := makeRFC1123Date(time.Now())
purgeList := unescapeUri(strings.Join(urls, "\n"))
headers := map[string]string{
"Date": date,
"Authorization": up.MakePurgeAuth(&PurgeAuthConfig{
PurgeList: purgeList,
DateStr: date,
}),
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8",
}
form := make(URL.Values)
form.Add("purge", purgeList)
body := strings.NewReader(form.Encode())
resp, err := up.doHTTPRequest("POST", purge, headers, body)
if err != nil {
return fails, errorOperation("purge", err)
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fails, errorOperation("purge read body", err)
}
result := map[string]interface{}{}
if err := json.Unmarshal(content, &result); err != nil {
return fails, err
}
if it, ok := result["invalid_domain_of_url"]; ok {
if urls, ok := it.([]interface{}); ok {
for _, url := range urls {
fails = append(fails, url.(string))
}
}
}
return fails, nil
}

@ -1,741 +0,0 @@
package upyun
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"path"
"strconv"
"strings"
"time"
)
const (
DefaultPartSize = 1024 * 1024
MaxPartNum = 10000
minResumePutFileSize = 10 * 1024 * 1024
)
type restReqConfig struct {
method string
uri string
query string
headers map[string]string
closeBody bool
httpBody io.Reader
useMD5 bool
}
// GetObjectConfig provides a configuration to Get method.
type GetObjectConfig struct {
Path string
// Headers contains custom http header, like User-Agent.
Headers map[string]string
LocalPath string
Writer io.Writer
}
// GetObjectConfig provides a configuration to List method.
type GetObjectsConfig struct {
Path string
Headers map[string]string
ObjectsChan chan *FileInfo
QuitChan chan bool
MaxListObjects int
MaxListTries int
// MaxListLevel: depth of recursion
MaxListLevel int
// DescOrder: whether list objects by desc-order
DescOrder bool
rootDir string
level int
objNum int
try int
}
// PutObjectConfig provides a configuration to Put method.
type PutObjectConfig struct {
Path string
LocalPath string
Reader io.Reader
Headers map[string]string
UseMD5 bool
UseResumeUpload bool
// Append Api Deprecated
// AppendContent bool
ResumePartSize int64
MaxResumePutTries int
}
type MoveObjectConfig struct {
SrcPath string
DestPath string
Headers map[string]string
}
type CopyObjectConfig struct {
SrcPath string
DestPath string
Headers map[string]string
}
// UploadFileConfig is multipart file upload config
type UploadPartConfig struct {
Reader io.Reader
PartSize int64
PartID int
}
type CompleteMultipartUploadConfig struct {
Md5 string
}
type InitMultipartUploadConfig struct {
Path string
PartSize int64
ContentLength int64 // optional
ContentType string
OrderUpload bool
}
type InitMultipartUploadResult struct {
UploadID string
Path string
PartSize int64
}
type DeleteObjectConfig struct {
Path string
Async bool
Folder bool // optional
}
type ModifyMetadataConfig struct {
Path string
Operation string
Headers map[string]string
}
type ListMultipartConfig struct {
Prefix string
Limit int64
}
type ListMultipartPartsConfig struct {
BeginID int
}
type MultipartUploadFile struct {
Key string `json:"key"`
UUID string `json:"uuid"`
Completed bool `json:"completed"`
CreatedAt int64 `json:"created_at"`
}
type ListMultipartUploadResult struct {
Files []*MultipartUploadFile `json:"files"`
}
type MultipartUploadedPart struct {
Etag string `json:"etag"`
Size int64 `json:"size"`
Id int `json:"id"`
}
type ListUploadedPartsResult struct {
Parts []*MultipartUploadedPart `json:"parts"`
}
func (up *UpYun) Usage() (n int64, err error) {
var resp *http.Response
resp, err = up.doRESTRequest(&restReqConfig{
method: "GET",
uri: "/",
query: "usage",
})
if err == nil {
n, err = readHTTPBodyToInt(resp)
}
if err != nil {
return 0, errorOperation("usage", err)
}
return n, nil
}
func (up *UpYun) Mkdir(path string) error {
_, err := up.doRESTRequest(&restReqConfig{
method: "POST",
uri: path,
headers: map[string]string{
"folder": "true",
"x-upyun-folder": "true",
},
closeBody: true,
})
if err != nil {
return errorOperation(fmt.Sprintf("mkdir %s", path), err)
}
return nil
}
// TODO: maybe directory
func (up *UpYun) Get(config *GetObjectConfig) (fInfo *FileInfo, err error) {
if config.LocalPath != "" {
var fd *os.File
if fd, err = os.Create(config.LocalPath); err != nil {
return nil, errorOperation("create file", err)
}
defer fd.Close()
config.Writer = fd
}
if config.Writer == nil {
return nil, errors.New("no writer")
}
resp, err := up.doRESTRequest(&restReqConfig{
method: "GET",
uri: config.Path,
})
if err != nil {
return nil, errorOperation(fmt.Sprintf("get %s", config.Path), err)
}
defer resp.Body.Close()
fInfo = parseHeaderToFileInfo(resp.Header, false)
fInfo.Name = config.Path
if fInfo.Size, err = io.Copy(config.Writer, resp.Body); err != nil {
return nil, errorOperation("io copy", err)
}
return
}
func (up *UpYun) put(config *PutObjectConfig) error {
/* Append Api Deprecated
if config.AppendContent {
if config.Headers == nil {
config.Headers = make(map[string]string)
}
config.Headers["X-Upyun-Append"] = "true"
}
*/
_, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: config.Path,
headers: config.Headers,
closeBody: true,
httpBody: config.Reader,
useMD5: config.UseMD5,
})
if err != nil {
return errorOperation(fmt.Sprintf("put %s", config.Path), err)
}
return nil
}
func getPartInfo(partSize, fsize int64) (int64, int64, error) {
if partSize <= 0 {
partSize = DefaultPartSize
}
if partSize < DefaultPartSize {
return 0, 0, fmt.Errorf("The minimum of part size is %d", DefaultPartSize)
}
if partSize%DefaultPartSize != 0 {
return 0, 0, fmt.Errorf("The part size is a multiple of %d", DefaultPartSize)
}
partNum := (fsize + partSize - 1) / partSize
if partNum > MaxPartNum {
return 0, 0, fmt.Errorf("The maximum part number is %d", MaxPartNum)
}
return partSize, partNum, nil
}
func (up *UpYun) resumePut(config *PutObjectConfig) error {
f, ok := config.Reader.(*os.File)
if !ok {
return errors.New("resumePut: type != *os.File")
}
fileinfo, err := f.Stat()
if err != nil {
return errorOperation("stat", err)
}
fsize := fileinfo.Size()
if fsize < minResumePutFileSize {
return up.put(config)
}
if config.ResumePartSize == 0 {
config.ResumePartSize = DefaultPartSize
}
maxPartID := int((fsize+config.ResumePartSize-1)/config.ResumePartSize - 1)
if config.Headers == nil {
config.Headers = make(map[string]string)
}
curSize, partSize := int64(0), config.ResumePartSize
headers := config.Headers
uploadInfo, err := up.InitMultipartUpload(&InitMultipartUploadConfig{
Path: config.Path,
PartSize: partSize,
ContentType: headers["Content-Type"],
ContentLength: fsize,
OrderUpload: true,
})
if err != nil {
return err
}
for id := 0; id <= maxPartID; id++ {
if curSize+partSize > fsize {
partSize = fsize - curSize
}
fragFile, err := newFragmentFile(f, curSize, partSize)
if err != nil {
return errorOperation("new fragment file", err)
}
try := 0
for ; config.MaxResumePutTries == 0 || try < config.MaxResumePutTries; try++ {
err = up.UploadPart(uploadInfo, &UploadPartConfig{
PartID: id,
PartSize: partSize,
Reader: fragFile,
})
if err == nil {
break
}
fragFile.Seek(0, 0)
}
if config.MaxResumePutTries > 0 && try == config.MaxResumePutTries {
return err
}
curSize += partSize
}
completeConfig := &CompleteMultipartUploadConfig{}
if config.UseMD5 {
f.Seek(0, 0)
completeConfig.Md5, _ = md5File(f)
}
return up.CompleteMultipartUpload(uploadInfo, completeConfig)
}
func (up *UpYun) Put(config *PutObjectConfig) (err error) {
if config.LocalPath != "" {
var fd *os.File
if fd, err = os.Open(config.LocalPath); err != nil {
return errorOperation("open file", err)
}
defer fd.Close()
config.Reader = fd
}
if config.UseResumeUpload {
return up.resumePut(config)
}
return up.put(config)
}
func (up *UpYun) Move(config *MoveObjectConfig) error {
headers := map[string]string{
"X-Upyun-Move-Source": path.Join("/", up.Bucket, escapeUri(config.SrcPath)),
}
for k, v := range config.Headers {
headers[k] = v
}
_, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: config.DestPath,
headers: headers,
})
if err != nil {
return errorOperation("move source", err)
}
return nil
}
func (up *UpYun) Copy(config *CopyObjectConfig) error {
headers := map[string]string{
"X-Upyun-Copy-Source": path.Join("/", up.Bucket, escapeUri(config.SrcPath)),
}
for k, v := range config.Headers {
headers[k] = v
}
_, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: config.DestPath,
headers: headers,
})
if err != nil {
return errorOperation("copy source", err)
}
return nil
}
func (up *UpYun) InitMultipartUpload(config *InitMultipartUploadConfig) (*InitMultipartUploadResult, error) {
partSize, _, err := getPartInfo(config.PartSize, config.ContentLength)
if err != nil {
return nil, errorOperation("init multipart", err)
}
headers := make(map[string]string)
headers["X-Upyun-Multi-Type"] = config.ContentType
if config.ContentLength > 0 {
headers["X-Upyun-Multi-Length"] = strconv.FormatInt(config.ContentLength, 10)
}
headers["X-Upyun-Multi-Stage"] = "initiate"
if !config.OrderUpload {
headers["X-Upyun-Multi-Disorder"] = "true"
}
headers["X-Upyun-Multi-Part-Size"] = strconv.FormatInt(partSize, 10)
resp, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: config.Path,
headers: headers,
closeBody: true,
})
if err != nil {
return nil, errorOperation("init multipart", err)
}
return &InitMultipartUploadResult{
UploadID: resp.Header.Get("X-Upyun-Multi-Uuid"),
Path: config.Path,
PartSize: partSize,
}, nil
}
func (up *UpYun) UploadPart(initResult *InitMultipartUploadResult, part *UploadPartConfig) error {
headers := make(map[string]string)
headers["X-Upyun-Multi-Stage"] = "upload"
headers["X-Upyun-Multi-Uuid"] = initResult.UploadID
headers["X-Upyun-Part-Id"] = strconv.FormatInt(int64(part.PartID), 10)
headers["Content-Length"] = strconv.FormatInt(part.PartSize, 10)
_, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: initResult.Path,
headers: headers,
closeBody: true,
useMD5: false,
httpBody: part.Reader,
})
if err != nil {
return errorOperation("upload multipart", err)
}
return nil
}
func (up *UpYun) CompleteMultipartUpload(initResult *InitMultipartUploadResult, config *CompleteMultipartUploadConfig) error {
headers := make(map[string]string)
headers["X-Upyun-Multi-Stage"] = "complete"
headers["X-Upyun-Multi-Uuid"] = initResult.UploadID
if config != nil {
if config.Md5 != "" {
headers["X-Upyun-Multi-Md5"] = config.Md5
}
}
_, err := up.doRESTRequest(&restReqConfig{
method: "PUT",
uri: initResult.Path,
headers: headers,
})
if err != nil {
return errorOperation("complete multipart", err)
}
return nil
}
func (up *UpYun) ListMultipartUploads(config *ListMultipartConfig) (*ListMultipartUploadResult, error) {
headers := make(map[string]string)
headers["X-Upyun-List-Type"] = "multi"
if config.Prefix != "" {
headers["X-Upyun-List-Prefix"] = base64.StdEncoding.EncodeToString([]byte(config.Prefix))
}
if config.Limit > 0 {
headers["X-Upyun-List-Limit"] = strconv.FormatInt(config.Limit, 10)
}
res, err := up.doRESTRequest(&restReqConfig{
method: "GET",
headers: headers,
uri: "/",
closeBody: false,
useMD5: false,
})
if err != nil {
return nil, errorOperation("list multipart", err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, errorOperation("list multipart read body", err)
}
result := &ListMultipartUploadResult{}
err = json.Unmarshal(body, result)
if err != nil {
return nil, errorOperation("list multipart read body", err)
}
return result, nil
}
func (up *UpYun) ListMultipartParts(intiResult *InitMultipartUploadResult, config *ListMultipartPartsConfig) (*ListUploadedPartsResult, error) {
headers := make(map[string]string)
headers["X-Upyun-Multi-Uuid"] = intiResult.UploadID
if config.BeginID > 0 {
headers["X-Upyun-Part-Id"] = fmt.Sprint(config.BeginID)
}
res, err := up.doRESTRequest(&restReqConfig{
method: "GET",
headers: headers,
uri: intiResult.Path,
closeBody: false,
useMD5: false,
})
if err != nil {
return nil, errorOperation("list multipart parts", err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, errorOperation("list multipart parts read body", err)
}
result := &ListUploadedPartsResult{}
err = json.Unmarshal(body, result)
if err != nil {
return nil, errorOperation("list multipart parts read body", err)
}
return result, nil
}
func (up *UpYun) Delete(config *DeleteObjectConfig) error {
headers := map[string]string{}
if config.Async {
headers["x-upyun-async"] = "true"
}
if config.Folder {
headers["x-upyun-folder"] = "true"
}
_, err := up.doRESTRequest(&restReqConfig{
method: "DELETE",
uri: config.Path,
headers: headers,
closeBody: true,
})
if err != nil {
return errorOperation("delete", err)
}
return nil
}
func (up *UpYun) GetInfo(path string) (*FileInfo, error) {
resp, err := up.doRESTRequest(&restReqConfig{
method: "HEAD",
uri: path,
closeBody: true,
})
if err != nil {
return nil, errorOperation("get info", err)
}
fInfo := parseHeaderToFileInfo(resp.Header, true)
fInfo.Name = path
return fInfo, nil
}
func (up *UpYun) List(config *GetObjectsConfig) error {
if config.ObjectsChan == nil {
return errors.New("ObjectsChan is nil")
}
if config.Headers == nil {
config.Headers = make(map[string]string)
}
if config.QuitChan == nil {
config.QuitChan = make(chan bool)
}
// 50 is nice value
if _, exist := config.Headers["X-List-Limit"]; !exist {
config.Headers["X-List-Limit"] = "50"
}
if config.DescOrder {
config.Headers["X-List-Order"] = "desc"
}
config.Headers["X-UpYun-Folder"] = "true"
config.Headers["Accept"] = "application/json"
// 1st level
if config.level == 0 {
defer close(config.ObjectsChan)
}
for {
resp, err := up.doRESTRequest(&restReqConfig{
method: "GET",
uri: config.Path,
headers: config.Headers,
})
if err != nil {
var nerr net.Error
if ok := errors.As(err, &nerr); ok {
config.try++
if config.MaxListTries == 0 || config.try < config.MaxListTries {
time.Sleep(10 * time.Millisecond)
continue
}
}
return errorOperation("list", err)
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return errorOperation("list read body", err)
}
iter, files, err := parseBodyToFileInfos(b)
if err != nil {
return errorOperation("list read body", err)
}
for _, fInfo := range files {
if fInfo.IsDir && (config.level+1 < config.MaxListLevel || config.MaxListLevel == -1) {
rConfig := &GetObjectsConfig{
Path: path.Join(config.Path, fInfo.Name),
QuitChan: config.QuitChan,
ObjectsChan: config.ObjectsChan,
MaxListTries: config.MaxListTries,
MaxListObjects: config.MaxListObjects,
DescOrder: config.DescOrder,
MaxListLevel: config.MaxListLevel,
level: config.level + 1,
rootDir: path.Join(config.rootDir, fInfo.Name),
try: config.try,
objNum: config.objNum,
}
if err = up.List(rConfig); err != nil {
return err
}
// empty folder
if config.objNum == rConfig.objNum {
fInfo.IsEmptyDir = true
}
config.try, config.objNum = rConfig.try, rConfig.objNum
}
if config.rootDir != "" {
fInfo.Name = path.Join(config.rootDir, fInfo.Name)
}
select {
case <-config.QuitChan:
return nil
default:
config.ObjectsChan <- fInfo
}
config.objNum++
if config.MaxListObjects > 0 && config.objNum >= config.MaxListObjects {
return nil
}
}
if iter == "g2gCZAAEbmV4dGQAA2VvZg" {
return nil
}
config.Headers["X-List-Iter"] = iter
}
}
func (up *UpYun) ModifyMetadata(config *ModifyMetadataConfig) error {
if config.Operation == "" {
config.Operation = "merge"
}
_, err := up.doRESTRequest(&restReqConfig{
method: "PATCH",
uri: config.Path,
query: "metadata=" + config.Operation,
headers: config.Headers,
closeBody: true,
})
if err != nil {
return errorOperation("modify metadata", err)
}
return nil
}
func (up *UpYun) doRESTRequest(config *restReqConfig) (*http.Response, error) {
escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
if strings.HasSuffix(config.uri, "/") {
escUri += "/"
}
if config.query != "" {
escUri += "?" + config.query
}
headers := map[string]string{}
hasMD5 := false
for k, v := range config.headers {
if strings.ToLower(k) == "content-md5" && v != "" {
hasMD5 = true
}
headers[k] = v
}
headers["Date"] = makeRFC1123Date(time.Now())
headers["Host"] = "v0.api.upyun.com"
if !hasMD5 && config.useMD5 {
switch v := config.httpBody.(type) {
case *os.File:
headers["Content-MD5"], _ = md5File(v)
case UpYunPutReader:
headers["Content-MD5"] = v.MD5()
}
}
if up.deprecated {
if _, ok := headers["Content-Length"]; !ok {
size := int64(0)
switch v := config.httpBody.(type) {
case *os.File:
if fInfo, err := v.Stat(); err == nil {
size = fInfo.Size()
}
case UpYunPutReader:
size = int64(v.Len())
}
headers["Content-Length"] = fmt.Sprint(size)
}
headers["Authorization"] = up.MakeRESTAuth(&RESTAuthConfig{
Method: config.method,
Uri: escUri,
DateStr: headers["Date"],
LengthStr: headers["Content-Length"],
})
} else {
headers["Authorization"] = up.MakeUnifiedAuth(&UnifiedAuthConfig{
Method: config.method,
Uri: escUri,
DateStr: headers["Date"],
ContentMD5: headers["Content-MD5"],
})
}
endpoint := up.doGetEndpoint("v0.api.upyun.com")
url := fmt.Sprintf("http://%s%s", endpoint, escUri)
resp, err := up.doHTTPRequest(config.method, url, headers, config.httpBody)
if err != nil {
return nil, err
}
if config.closeBody {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
return resp, nil
}

@ -1,61 +0,0 @@
package upyun
import (
"net"
"net/http"
"time"
)
const (
version = "3.0.1"
defaultChunkSize = 32 * 1024
defaultConnectTimeout = time.Second * 60
)
type UpYunConfig struct {
Bucket string
Operator string
Password string
Secret string // deprecated
Hosts map[string]string
UserAgent string
}
type UpYun struct {
UpYunConfig
httpc *http.Client
deprecated bool
}
func NewUpYun(config *UpYunConfig) *UpYun {
up := &UpYun{}
up.Bucket = config.Bucket
up.Operator = config.Operator
up.Password = md5Str(config.Password)
up.Secret = config.Secret
up.Hosts = config.Hosts
if config.UserAgent != "" {
up.UserAgent = config.UserAgent
} else {
up.UserAgent = makeUserAgent(version)
}
up.httpc = &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (c net.Conn, err error) {
return net.DialTimeout(network, addr, defaultConnectTimeout)
},
},
}
return up
}
func (up *UpYun) SetHTTPClient(httpc *http.Client) {
up.httpc = httpc
}
func (up *UpYun) UseDeprecatedApi() {
up.deprecated = true
}

@ -1,234 +0,0 @@
package upyun
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
var (
escape []uint32 = []uint32{
0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */
/* ?>=< ;:98 7654 3210 /.-, +*)( '&%$ #"! */
0xfc001fff, /* 1111 1100 0000 0000 0001 1111 1111 1111 */
/* _^]\ [ZYX WVUT SRQP ONML KJIH GFED CBA@ */
0x78000001, /* 0111 1000 0000 0000 0000 0000 0000 0001 */
/* ~}| {zyx wvut srqp onml kjih gfed cba` */
0xb8000001, /* 1011 1000 0000 0000 0000 0000 0000 0001 */
0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */
0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */
0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */
0xffffffff, /* 1111 1111 1111 1111 1111 1111 1111 1111 */
}
hexMap = "0123456789ABCDEF"
)
func makeRFC1123Date(d time.Time) string {
utc := d.UTC().Format(time.RFC1123)
return strings.ReplaceAll(utc, "UTC", "GMT")
}
func makeUserAgent(version string) string {
return fmt.Sprintf("UPYUN Go SDK V2/%s", version)
}
func md5Str(s string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(s)))
}
func base64ToStr(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
func hmacSha1(key string, data []byte) []byte {
hm := hmac.New(sha1.New, []byte(key))
hm.Write(data)
return hm.Sum(nil)
}
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
func escapeUri(s string) string {
size := 0
for i := 0; i < len(s); i++ {
c := s[i]
if escape[c>>5]&(1<<(c&0x1f)) > 0 {
size += 3
} else {
size++
}
}
ret := make([]byte, size)
j := 0
for i := 0; i < len(s); i++ {
c := s[i]
if escape[c>>5]&(1<<(c&0x1f)) > 0 {
ret[j] = '%'
ret[j+1] = hexMap[c>>4]
ret[j+2] = hexMap[c&0xf]
j += 3
} else {
ret[j] = c
j += 1
}
}
return string(ret)
}
func unescapeUri(s string) string {
n := 0
for i := 0; i < len(s); n++ {
switch s[i] {
case '%':
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
// if not correct, return original string
return s
}
i += 3
default:
i++
}
}
t := make([]byte, n)
j := 0
for i := 0; i < len(s); j++ {
switch s[i] {
case '%':
t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
i += 3
default:
t[j] = s[i]
i++
}
}
return string(t)
}
var readHTTPBody = ioutil.ReadAll
func readHTTPBodyToStr(resp *http.Response) (string, error) {
b, err := readHTTPBody(resp.Body)
resp.Body.Close()
if err != nil {
return "", errorOperation("read body", err)
}
return string(b), nil
}
func addQueryToUri(rawurl string, kwargs map[string]string) string {
u, _ := url.ParseRequestURI(rawurl)
q := u.Query()
for k, v := range kwargs {
q.Add(k, v)
}
u.RawQuery = q.Encode()
return u.String()
}
func encodeQueryToPayload(kwargs map[string]string) string {
payload := url.Values{}
for k, v := range kwargs {
payload.Set(k, v)
}
return payload.Encode()
}
func readHTTPBodyToInt(resp *http.Response) (int64, error) {
b, err := readHTTPBody(resp.Body)
resp.Body.Close()
if err != nil {
return 0, errorOperation("read body", err)
}
n, err := strconv.ParseInt(string(b), 10, 64)
if err != nil {
return 0, errorOperation("parse int", err)
}
return n, nil
}
func parseStrToInt(s string) int64 {
n, _ := strconv.ParseInt(s, 10, 64)
return n
}
func md5File(f io.ReadSeeker) (string, error) {
offset, _ := f.Seek(0, 0)
defer f.Seek(offset, 0)
hash := md5.New()
if _, err := io.Copy(hash, f); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
type JsonFileInfo struct {
ContentType string `json:"type"`
Name string `json:"name"`
Length int64 `json:"length"`
LastModified int64 `json:"last_modified"`
}
type JsonFiles struct {
Files []*JsonFileInfo `json:"files"`
Iter string `json:"iter"`
}
func parseBodyToFileInfos(b []byte) (iter string, fInfos []*FileInfo, err error) {
files := &JsonFiles{}
err = json.Unmarshal(b, files)
if err != nil {
return
}
iter = files.Iter
fInfos = make([]*FileInfo, len(files.Files))
for i, f := range files.Files {
fInfos[i] = &FileInfo{
Name: f.Name,
IsDir: f.ContentType == "folder",
ContentType: f.ContentType,
Size: f.Length,
Time: time.Unix(f.LastModified, 0),
}
}
return
}

17
vendor/modules.txt vendored

@ -90,9 +90,6 @@ github.com/golang/snappy
# github.com/google/go-querystring v1.1.0
## explicit; go 1.10
github.com/google/go-querystring/query
# github.com/huaweicloud/huaweicloud-sdk-go-obs v3.22.11+incompatible
## explicit
github.com/huaweicloud/huaweicloud-sdk-go-obs/obs
# github.com/jackc/pgpassfile v1.0.0
## explicit; go 1.12
github.com/jackc/pgpassfile
@ -113,9 +110,6 @@ github.com/jackc/pgx/v5/pgconn/internal/ctxwatch
github.com/jackc/pgx/v5/pgproto3
github.com/jackc/pgx/v5/pgtype
github.com/jackc/pgx/v5/stdlib
# github.com/jasonlvhit/gocron v0.0.1
## explicit; go 1.13
github.com/jasonlvhit/gocron
# github.com/jinzhu/inflection v1.0.0
## explicit
github.com/jinzhu/inflection
@ -168,18 +162,12 @@ github.com/mvdan/xurls
# github.com/natefinch/lumberjack v2.0.0+incompatible
## explicit
github.com/natefinch/lumberjack
# github.com/nilorg/sdk v0.0.0-20221104025912-4b6ccb7004d8
## explicit; go 1.12
github.com/nilorg/sdk/convert
# github.com/oschwald/geoip2-golang v1.8.0
## explicit; go 1.18
github.com/oschwald/geoip2-golang
# github.com/oschwald/maxminddb-golang v1.10.0
## explicit; go 1.18
github.com/oschwald/maxminddb-golang
# github.com/patrickmn/go-cache v2.1.0+incompatible
## explicit
github.com/patrickmn/go-cache
# github.com/pelletier/go-toml/v2 v2.0.6
## explicit; go 1.16
github.com/pelletier/go-toml/v2
@ -240,7 +228,7 @@ github.com/syndtr/goleveldb/leveldb/opt
github.com/syndtr/goleveldb/leveldb/storage
github.com/syndtr/goleveldb/leveldb/table
github.com/syndtr/goleveldb/leveldb/util
# github.com/tencentyun/cos-go-sdk-v5 v0.7.40
# github.com/tencentyun/cos-go-sdk-v5 v0.7.41
## explicit; go 1.12
github.com/tencentyun/cos-go-sdk-v5
# github.com/tidwall/pretty v1.2.0
@ -259,9 +247,6 @@ github.com/ugorji/go/codec
github.com/ulikunitz/xz/internal/hash
github.com/ulikunitz/xz/internal/xlog
github.com/ulikunitz/xz/lzma
# github.com/upyun/go-sdk/v3 v3.0.3
## explicit; go 1.13
github.com/upyun/go-sdk/v3/upyun
# github.com/xdg-go/pbkdf2 v1.0.0
## explicit; go 1.9
github.com/xdg-go/pbkdf2

Loading…
Cancel
Save