diff --git a/backend/init/init.go b/backend/init/init.go
index 8fcd249c421d..1efa695413f4 100644
--- a/backend/init/init.go
+++ b/backend/init/init.go
@@ -20,6 +20,7 @@ import (
backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs"
backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
backendManta "github.com/hashicorp/terraform/backend/remote-state/manta"
+ backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss"
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift"
)
@@ -68,6 +69,7 @@ func Init(services *disco.Disco) {
// Deprecated backends.
"azure": deprecateBackend(backendAzure.New(),
`Warning: "azure" name is deprecated, please use "azurerm"`),
+ "oss": func() backend.Backend { return backendOSS.New() },
}
// Add the legacy remote backends that haven't yet been converted to
diff --git a/backend/remote-state/oss/backend.go b/backend/remote-state/oss/backend.go
new file mode 100644
index 000000000000..a7cf61addedd
--- /dev/null
+++ b/backend/remote-state/oss/backend.go
@@ -0,0 +1,252 @@
+package oss
+
+import (
+ "context"
+ "fmt"
+ "github.com/aliyun/aliyun-oss-go-sdk/oss"
+ "github.com/denverdino/aliyungo/common"
+ "github.com/denverdino/aliyungo/location"
+ "github.com/hashicorp/terraform/backend"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+ "os"
+ "strings"
+
+ "log"
+ "time"
+)
+
+// New creates a new backend for OSS remote state.
+func New() backend.Backend {
+ s := &schema.Backend{
+ Schema: map[string]*schema.Schema{
+ "access_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Alibaba Cloud Access Key ID",
+ DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")),
+ },
+
+ "secret_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Alibaba Cloud Access Secret Key",
+ DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")),
+ },
+
+ "security_token": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Alibaba Cloud Security Token",
+ DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", os.Getenv("SECURITY_TOKEN")),
+ },
+
+ "region": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "The region of the OSS bucket.",
+ DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")),
+ },
+
+ "bucket": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The name of the OSS bucket",
+ },
+
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The path relative to your object storage directory where the state file will be stored.",
+ },
+
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "The name of the state file inside the bucket",
+ ValidateFunc: func(v interface{}, s string) ([]string, []error) {
+ if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") {
+ return nil, []error{fmt.Errorf("name can not start and end with '/'")}
+ }
+ return nil, nil
+ },
+ Default: "terraform.tfstate",
+ },
+
+ "lock": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether to lock state access. Defaults to true",
+ Default: true,
+ },
+
+ "encrypt": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether to enable server side encryption of the state file",
+ Default: false,
+ },
+
+ "acl": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Object ACL to be applied to the state file",
+ Default: "",
+ ValidateFunc: func(v interface{}, k string) ([]string, []error) {
+ if value := v.(string); value != "" {
+ acls := oss.ACLType(value)
+ if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite {
+ return nil, []error{fmt.Errorf(
+ "%q must be a valid ACL value , expected %s, %s or %s, got %q",
+ k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)}
+ }
+ }
+ return nil, nil
+ },
+ },
+ },
+ }
+
+ result := &Backend{Backend: s}
+ result.Backend.ConfigureFunc = result.configure
+ return result
+}
+
+type Backend struct {
+ *schema.Backend
+
+ // The fields below are set from configure
+ ossClient *oss.Client
+
+ bucketName string
+ statePath string
+ stateName string
+ serverSideEncryption bool
+ acl string
+ security_token string
+ endpoint string
+ lock bool
+}
+
+func (b *Backend) configure(ctx context.Context) error {
+ if b.ossClient != nil {
+ return nil
+ }
+
+ // Grab the resource data
+ d := schema.FromContextBackendConfig(ctx)
+
+ b.bucketName = d.Get("bucket").(string)
+ dir := strings.Trim(d.Get("path").(string), "/")
+ if strings.HasPrefix(dir, "./") {
+ dir = strings.TrimPrefix(dir, "./")
+
+ }
+
+ b.statePath = dir
+ b.stateName = d.Get("name").(string)
+ b.serverSideEncryption = d.Get("encrypt").(bool)
+ b.acl = d.Get("acl").(string)
+ b.lock = d.Get("lock").(bool)
+
+ access_key := d.Get("access_key").(string)
+ secret_key := d.Get("secret_key").(string)
+ security_token := d.Get("security_token").(string)
+ endpoint := os.Getenv("OSS_ENDPOINT")
+ if endpoint == "" {
+ region := common.Region(d.Get("region").(string))
+ if end, err := b.getOSSEndpointByRegion(access_key, secret_key, security_token, region); end != "" {
+ endpoint = endpoint
+ } else {
+ log.Printf("[DEBUG] Describe OSS endpoint got an error: %#v", err)
+ endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", string(region))
+ }
+ }
+
+ log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint)
+ var options []oss.ClientOption
+ if security_token != "" {
+ options = append(options, oss.SecurityToken(security_token))
+ }
+ options = append(options, oss.UserAgent(fmt.Sprintf("HashiCorp-Terraform-v%s", strings.TrimSuffix(terraform.VersionString(), "-dev"))))
+
+ if client, err := oss.New(fmt.Sprintf("http://%s", endpoint), access_key, secret_key, options...); err != nil {
+ return err
+ } else {
+ b.ossClient = client
+ }
+
+ return nil
+}
+
+func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token string, region common.Region) (string, error) {
+
+ endpointClient := location.NewClient(access_key, secret_key)
+ endpointClient.SetSecurityToken(security_token)
+ var endpointResp *location.DescribeEndpointsResponse
+ invoker := NewInvoker()
+ if err := invoker.Run(func() error {
+ resp, err := endpointClient.DescribeEndpoints(&location.DescribeEndpointsArgs{
+ Id: region,
+ ServiceCode: "oss",
+ Type: "openAPI",
+ })
+ endpointResp = resp
+ return err
+ }); err != nil {
+ return "", fmt.Errorf("Describe endpoint using region: %#v got an error: %#v.", region, err)
+ }
+ endpointItem := endpointResp.Endpoints.Endpoint
+ endpoint := ""
+ if endpointItem != nil && len(endpointItem) > 0 {
+ endpoint = endpointItem[0].Endpoint
+ }
+
+ return endpoint, nil
+}
+
+type Invoker struct {
+ catchers []*Catcher
+}
+
+type Catcher struct {
+ Reason string
+ RetryCount int
+ RetryWaitSeconds int
+}
+
+var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3}
+var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3}
+
+func NewInvoker() Invoker {
+ i := Invoker{}
+ i.AddCatcher(ClientErrorCatcher)
+ i.AddCatcher(ServiceBusyCatcher)
+ return i
+}
+
+func (a *Invoker) AddCatcher(catcher Catcher) {
+ a.catchers = append(a.catchers, &catcher)
+}
+
+func (a *Invoker) Run(f func() error) error {
+ err := f()
+
+ if err == nil {
+ return nil
+ }
+
+ for _, catcher := range a.catchers {
+ if strings.Contains(err.Error(), catcher.Reason) {
+ catcher.RetryCount--
+
+ if catcher.RetryCount <= 0 {
+ return fmt.Errorf("Retry timeout and got an error: %#v.", err)
+ } else {
+ time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second)
+ return a.Run(f)
+ }
+ }
+ }
+ return err
+}
diff --git a/backend/remote-state/oss/backend_state.go b/backend/remote-state/oss/backend_state.go
new file mode 100644
index 000000000000..eb5a8a7d1b88
--- /dev/null
+++ b/backend/remote-state/oss/backend_state.go
@@ -0,0 +1,193 @@
+package oss
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/aliyun/aliyun-oss-go-sdk/oss"
+ "github.com/hashicorp/terraform/backend"
+ "github.com/hashicorp/terraform/state"
+ "github.com/hashicorp/terraform/state/remote"
+ "github.com/hashicorp/terraform/terraform"
+ "log"
+ "path"
+)
+
+const (
+ lockFileSuffix = ".tflock"
+)
+
+// get a remote client configured for this state
+func (b *Backend) remoteClient(name string) (*RemoteClient, error) {
+ if name == "" {
+ return nil, errors.New("missing state name")
+ }
+
+ client := &RemoteClient{
+ ossClient: b.ossClient,
+ bucketName: b.bucketName,
+ stateFile: b.stateFile(name),
+ lockFile: b.lockFile(name),
+ serverSideEncryption: b.serverSideEncryption,
+ acl: b.acl,
+ doLock: b.lock,
+ }
+
+ return client, nil
+}
+
+func (b *Backend) State(name string) (state.State, error) {
+ client, err := b.remoteClient(name)
+ if err != nil {
+ return nil, err
+ }
+ var stateMgr state.State = &remote.State{Client: client}
+
+ if !b.lock {
+ stateMgr = &state.LockDisabled{Inner: stateMgr}
+ }
+ // Check to see if this state already exists.
+ existing, err := b.States()
+ if err != nil {
+ return nil, err
+ }
+
+ log.Printf("[DEBUG] Current state name: %s. All States:%#v", name, existing)
+
+ exists := false
+ for _, s := range existing {
+ if s == name {
+ exists = true
+ break
+ }
+ }
+ // We need to create the object so it's listed by States.
+ if !exists {
+ // take a lock on this state while we write it
+ lockInfo := state.NewLockInfo()
+ lockInfo.Operation = "init"
+ lockId, err := client.Lock(lockInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to lock OSS state: %s", err)
+ }
+
+ // Local helper function so we can call it multiple places
+ lockUnlock := func(e error) error {
+ if err := stateMgr.Unlock(lockId); err != nil {
+ return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err)
+ }
+ return e
+ }
+
+ // Grab the value
+ if err := stateMgr.RefreshState(); err != nil {
+ err = lockUnlock(err)
+ return nil, err
+ }
+
+ // If we have no state, we have to create an empty state
+ if v := stateMgr.State(); v == nil {
+ if err := stateMgr.WriteState(terraform.NewState()); err != nil {
+ err = lockUnlock(err)
+ return nil, err
+ }
+ if err := stateMgr.PersistState(); err != nil {
+ err = lockUnlock(err)
+ return nil, err
+ }
+ }
+
+ // Unlock, the state should now be initialized
+ if err := lockUnlock(nil); err != nil {
+ return nil, err
+ }
+
+ }
+ return stateMgr, nil
+}
+
+func (b *Backend) States() ([]string, error) {
+ bucket, err := b.ossClient.Bucket(b.bucketName)
+ if err != nil {
+ return []string{""}, fmt.Errorf("Error getting bucket: %#v", err)
+ }
+
+ var options []oss.Option
+ options = append(options, oss.Prefix(b.statePath))
+ resp, err := bucket.ListObjects(options...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ result := []string{backend.DefaultStateName}
+ for _, obj := range resp.Objects {
+ if b.keyEnv(obj.Key) != "" {
+ result = append(result, b.keyEnv(obj.Key))
+ }
+ }
+
+ sort.Strings(result[1:])
+ return result, nil
+}
+
+func (b *Backend) DeleteState(name string) error {
+ if name == backend.DefaultStateName || name == "" {
+ return fmt.Errorf("can't delete default state")
+ }
+
+ client, err := b.remoteClient(name)
+ if err != nil {
+ return err
+ }
+ return client.Delete()
+}
+
+// extract the object name from the OSS key
+func (b *Backend) keyEnv(key string) string {
+ // we have 3 parts, the state path, the state name, and the state file
+ parts := strings.Split(key, "/")
+ length := len(parts)
+ if length < 3 {
+ // use default state
+ return ""
+ }
+
+ // shouldn't happen since we listed by prefix
+ if strings.Join(parts[0:length-2], "/") != b.statePath {
+ return ""
+ }
+
+ // not our key, so don't include it in our listing
+ if parts[length-1] != b.stateName {
+ return ""
+ }
+
+ return parts[length-2]
+}
+
+func (b *Backend) stateFile(name string) string {
+ if name == backend.DefaultStateName {
+ return path.Join(b.statePath, b.stateName)
+ }
+ return path.Join(b.statePath, name, b.stateName)
+}
+
+func (b *Backend) lockFile(name string) string {
+ if name == backend.DefaultStateName {
+ return path.Join(b.statePath, b.stateName+lockFileSuffix)
+ }
+ return path.Join(b.statePath, name, b.stateName+lockFileSuffix)
+}
+
+const stateUnlockError = `
+Error unlocking Alibaba Cloud OSS state file:
+
+Lock ID: %s
+Error message: %#v
+
+You may have to force-unlock this state in order to use it again.
+The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created.
+`
diff --git a/backend/remote-state/oss/backend_test.go b/backend/remote-state/oss/backend_test.go
new file mode 100644
index 000000000000..5938c44db12e
--- /dev/null
+++ b/backend/remote-state/oss/backend_test.go
@@ -0,0 +1,141 @@
+package oss
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/aliyun/aliyun-oss-go-sdk/oss"
+ "github.com/hashicorp/terraform/backend"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+ "strings"
+)
+
+// verify that we are doing ACC tests or the OSS tests specifically
+func testACC(t *testing.T) {
+ skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == ""
+ if skip {
+ t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST")
+ t.Skip()
+ }
+ if os.Getenv("ALICLOUD_REGION") == "" {
+ os.Setenv("ALICLOUD_REGION", "cn-beijing")
+ }
+}
+
+func TestBackend_impl(t *testing.T) {
+ var _ backend.Backend = new(Backend)
+}
+
+func TestBackendConfig(t *testing.T) {
+ testACC(t)
+ config := map[string]interface{}{
+ "region": "cn-beijing",
+ "bucket": "terraform-backend-oss-test",
+ "path": "mystate",
+ "name": "first.tfstate",
+ }
+
+ b := backend.TestBackendConfig(t, New(), config).(*Backend)
+
+ if !strings.HasPrefix(b.ossClient.Config.Endpoint, "http://oss-cn-beijing") {
+ t.Fatalf("Incorrect region was provided")
+ }
+ if b.bucketName != "terraform-backend-oss-test" {
+ t.Fatalf("Incorrect bucketName was provided")
+ }
+ if b.statePath != "mystate" {
+ t.Fatalf("Incorrect state file path was provided")
+ }
+ if b.stateName != "first.tfstate" {
+ t.Fatalf("Incorrect keyName was provided")
+ }
+
+ if b.ossClient.Config.AccessKeyID == "" {
+ t.Fatalf("No Access Key Id was provided")
+ }
+ if b.ossClient.Config.AccessKeySecret == "" {
+ t.Fatalf("No Secret Access Key was provided")
+ }
+}
+
+func TestBackendConfig_invalidKey(t *testing.T) {
+ testACC(t)
+ cfg := map[string]interface{}{
+ "region": "cn-beijing",
+ "bucket": "terraform-backend-oss-test",
+ "path": "/leading-slash",
+ "name": "/test.tfstate",
+ }
+
+ rawCfg, err := config.NewRawConfig(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resCfg := terraform.NewResourceConfig(rawCfg)
+
+ _, errs := New().Validate(resCfg)
+ if len(errs) != 1 {
+ t.Fatal("expected config validation error")
+ }
+}
+
+func TestBackend(t *testing.T) {
+ testACC(t)
+
+ bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
+ statePath := "multi/level/path/"
+
+ b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": statePath,
+ }).(*Backend)
+
+ b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": statePath,
+ }).(*Backend)
+
+ createOSSBucket(t, b1.ossClient, bucketName)
+ defer deleteOSSBucket(t, b1.ossClient, bucketName)
+
+ backend.TestBackendStates(t, b1)
+ backend.TestBackendStateLocks(t, b1, b2)
+ backend.TestBackendStateForceUnlock(t, b1, b2)
+}
+
+func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) {
+ // Be clear about what we're doing in case the user needs to clean this up later.
+ if err := ossClient.CreateBucket(bucketName); err != nil {
+ t.Fatal("failed to create test OSS bucket:", err)
+ }
+}
+
+func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) {
+ warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alicloud account and may incur storage charges. (error was %s)"
+
+ // first we have to get rid of the env objects, or we can't delete the bucket
+ bucket, err := ossClient.Bucket(bucketName)
+ if err != nil {
+ t.Fatal("Error getting bucket:", err)
+ return
+ }
+ objects, err := bucket.ListObjects()
+ if err != nil {
+ t.Logf(warning, err)
+ return
+ }
+ for _, obj := range objects.Objects {
+ if err := bucket.DeleteObject(obj.Key); err != nil {
+ // this will need cleanup no matter what, so just warn and exit
+ t.Logf(warning, err)
+ return
+ }
+ }
+
+ if err := ossClient.DeleteBucket(bucketName); err != nil {
+ t.Logf(warning, err)
+ }
+}
diff --git a/backend/remote-state/oss/client.go b/backend/remote-state/oss/client.go
new file mode 100644
index 000000000000..50e03f0a01e1
--- /dev/null
+++ b/backend/remote-state/oss/client.go
@@ -0,0 +1,248 @@
+package oss
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aliyun/aliyun-oss-go-sdk/oss"
+ multierror "github.com/hashicorp/go-multierror"
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/terraform/state"
+ "github.com/hashicorp/terraform/state/remote"
+ "log"
+ "sync"
+)
+
+type RemoteClient struct {
+ ossClient *oss.Client
+ bucketName string
+ stateFile string
+ lockFile string
+ serverSideEncryption bool
+ acl string
+ doLock bool
+ info *state.LockInfo
+ mu sync.Mutex
+}
+
+func (c *RemoteClient) Get() (payload *remote.Payload, err error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ buf, err := c.getObj(c.stateFile)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there was no data, then return nil
+ if buf == nil || len(buf.Bytes()) == 0 {
+ log.Printf("[DEBUG] State %s has no data.", c.stateFile)
+ return nil, nil
+ }
+ md5 := md5.Sum(buf.Bytes())
+
+ payload = &remote.Payload{
+ Data: buf.Bytes(),
+ MD5: md5[:],
+ }
+ return payload, nil
+}
+
+func (c *RemoteClient) Put(data []byte) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.putObj(c.stateFile, data)
+}
+
+func (c *RemoteClient) Delete() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.deleteObj(c.stateFile)
+}
+
+func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if !c.doLock {
+ return "", nil
+ }
+
+ bucket, err := c.ossClient.Bucket(c.bucketName)
+ if err != nil {
+ return "", fmt.Errorf("Error getting bucket: %#v", err)
+ }
+
+ infoJson, err := json.Marshal(info)
+ if err != nil {
+ return "", err
+ }
+
+ if info.ID == "" {
+ lockID, err := uuid.GenerateUUID()
+ if err != nil {
+ return "", err
+ }
+ info.ID = lockID
+ }
+
+ info.Path = c.lockFile
+ if exist, err := bucket.IsObjectExist(info.Path); err != nil {
+ return "", fmt.Errorf("Estimating object %s is exist got an error: %#v", info.Path, err)
+ } else if !exist {
+ if err := c.putObj(info.Path, infoJson); err != nil {
+ return "", err
+ }
+ } else if _, err := c.validLock(info.ID); err != nil {
+ return "", err
+ }
+
+ return info.ID, nil
+}
+
+func (c *RemoteClient) Unlock(id string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if !c.doLock {
+ return nil
+ }
+
+ lockInfo, err := c.validLock(id)
+ if err != nil {
+ return err
+ }
+
+ if err := c.deleteObj(c.lockFile); err != nil {
+ return &state.LockError{
+ Info: lockInfo,
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func (c *RemoteClient) putObj(key string, data []byte) error {
+ bucket, err := c.ossClient.Bucket(c.bucketName)
+ if err != nil {
+ return fmt.Errorf("Error getting bucket: %#v", err)
+ }
+ body := bytes.NewReader(data)
+
+ var options []oss.Option
+ if c.acl != "" {
+ options = append(options, oss.ACL(oss.ACLType(c.acl)))
+ }
+ options = append(options, oss.ContentType("application/json"))
+ if c.serverSideEncryption {
+ options = append(options, oss.ServerSideEncryption("AES256"))
+ }
+ options = append(options, oss.ContentLength(int64(len(data))))
+
+ if body != nil {
+ if err := bucket.PutObject(key, body, options...); err != nil {
+ return fmt.Errorf("failed to upload %s: %#v", key, err)
+ }
+ return nil
+ }
+ return nil
+}
+
+func (c *RemoteClient) getObj(key string) (*bytes.Buffer, error) {
+ bucket, err := c.ossClient.Bucket(c.bucketName)
+ if err != nil {
+ return nil, fmt.Errorf("Error getting bucket: %#v", err)
+ }
+
+ if exist, err := bucket.IsObjectExist(key); err != nil {
+ return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", key, err)
+ } else if !exist {
+ return nil, nil
+ }
+
+ var options []oss.Option
+ output, err := bucket.GetObject(key, options...)
+ if err != nil {
+ return nil, fmt.Errorf("Error getting object: %#v", err)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ if _, err := io.Copy(buf, output); err != nil {
+ return nil, fmt.Errorf("Failed to read remote state: %s", err)
+ }
+ return buf, nil
+}
+
+func (c *RemoteClient) deleteObj(key string) error {
+ bucket, err := c.ossClient.Bucket(c.bucketName)
+ if err != nil {
+ return fmt.Errorf("Error getting bucket: %#v", err)
+ }
+
+ exist, err := bucket.IsObjectExist(key)
+ if err != nil {
+ return fmt.Errorf("OSS ensure object existing got an error: %#v", err)
+ }
+
+ if !exist {
+ return nil
+ }
+
+ if err := bucket.DeleteObject(key); err != nil {
+ return fmt.Errorf("Error deleting object %s: %#v", key, err)
+ }
+ return nil
+}
+
+func (c *RemoteClient) lockError(err error) *state.LockError {
+ lockErr := &state.LockError{
+ Err: err,
+ }
+
+ info, infoErr := c.lockInfo()
+ if infoErr != nil {
+ lockErr.Err = multierror.Append(lockErr.Err, infoErr)
+ } else {
+ lockErr.Info = info
+ }
+ return lockErr
+}
+
+// lockInfo reads the lock file, parses its contents and returns the parsed
+// LockInfo struct.
+func (c *RemoteClient) lockInfo() (*state.LockInfo, error) {
+ buf, err := c.getObj(c.lockFile)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil || len(buf.Bytes()) == 0 {
+ return nil, nil
+ }
+ info := &state.LockInfo{}
+ if err := json.Unmarshal(buf.Bytes(), info); err != nil {
+ return nil, err
+ }
+
+ return info, nil
+}
+
+func (c *RemoteClient) validLock(id string) (*state.LockInfo, *state.LockError) {
+ lockErr := &state.LockError{}
+ lockInfo, err := c.lockInfo()
+ if err != nil {
+ lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
+ return nil, lockErr
+ }
+ lockErr.Info = lockInfo
+
+ if lockInfo.ID != id {
+ lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
+ return nil, lockErr
+ }
+ return lockInfo, nil
+}
diff --git a/backend/remote-state/oss/client_test.go b/backend/remote-state/oss/client_test.go
new file mode 100644
index 000000000000..e4bebd83a1f0
--- /dev/null
+++ b/backend/remote-state/oss/client_test.go
@@ -0,0 +1,112 @@
+package oss
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/terraform/backend"
+ "github.com/hashicorp/terraform/state"
+ "github.com/hashicorp/terraform/state/remote"
+)
+
+func TestRemoteClient_impl(t *testing.T) {
+ var _ remote.Client = new(RemoteClient)
+ var _ remote.ClientLocker = new(RemoteClient)
+}
+
+func TestRemoteClient(t *testing.T) {
+ testACC(t)
+ bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
+ path := "testState"
+
+ b := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": path,
+ "encrypt": true,
+ }).(*Backend)
+
+ createOSSBucket(t, b.ossClient, bucketName)
+ defer deleteOSSBucket(t, b.ossClient, bucketName)
+
+ state, err := b.State(backend.DefaultStateName)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ remote.TestClient(t, state.(*remote.State).Client)
+}
+
+func TestOSS_stateLock(t *testing.T) {
+ testACC(t)
+ bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
+ path := "testState"
+
+ b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": path,
+ "encrypt": true,
+ }).(*Backend)
+
+ b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": path,
+ "encrypt": true,
+ }).(*Backend)
+
+ createOSSBucket(t, b1.ossClient, bucketName)
+ defer deleteOSSBucket(t, b1.ossClient, bucketName)
+
+ s1, err := b1.State(backend.DefaultStateName)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ s2, err := b2.State(backend.DefaultStateName)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
+}
+
+// verify that we can unlock a state with an existing lock
+func TestOSS_destroyLock(t *testing.T) {
+ testACC(t)
+ bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
+ path := "testState"
+
+ b := backend.TestBackendConfig(t, New(), map[string]interface{}{
+ "bucket": bucketName,
+ "path": path,
+ "encrypt": true,
+ }).(*Backend)
+
+ createOSSBucket(t, b.ossClient, bucketName)
+ defer deleteOSSBucket(t, b.ossClient, bucketName)
+
+ s, err := b.State(backend.DefaultStateName)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ c := s.(*remote.State).Client.(*RemoteClient)
+
+ info := state.NewLockInfo()
+ id, err := c.Lock(info)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := c.Unlock(id); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ res, err := c.getObj(c.lockFile)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if res != nil && res.String() != "" {
+ t.Fatalf("lock key not cleaned up at: %s", string(c.stateFile))
+ }
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
new file mode 100644
index 000000000000..5f9bd84f28e6
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go
@@ -0,0 +1,97 @@
+package oss
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "hash"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// 用于signHeader的字典排序存放容器。
+type headerSorter struct {
+ Keys []string
+ Vals []string
+}
+
+// 生成签名方法(直接设置请求的Header)。
+func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
+ // Get the final Authorization' string
+ authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
+
+ // Give the parameter "Authorization" value
+ req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
+}
+
+func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
+ // Find out the "x-oss-"'s address in this request'header
+ temp := make(map[string]string)
+
+ for k, v := range req.Header {
+ if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
+ temp[strings.ToLower(k)] = v[0]
+ }
+ }
+ hs := newHeaderSorter(temp)
+
+ // Sort the temp by the Ascending Order
+ hs.Sort()
+
+ // Get the CanonicalizedOSSHeaders
+ canonicalizedOSSHeaders := ""
+ for i := range hs.Keys {
+ canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
+ }
+
+ // Give other parameters values
+ // when sign url, date is expires
+ date := req.Header.Get(HTTPHeaderDate)
+ contentType := req.Header.Get(HTTPHeaderContentType)
+ contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
+
+ signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
+ h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
+ io.WriteString(h, signStr)
+ signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ return signedStr
+}
+
+// Additional function for function SignHeader.
+func newHeaderSorter(m map[string]string) *headerSorter {
+ hs := &headerSorter{
+ Keys: make([]string, 0, len(m)),
+ Vals: make([]string, 0, len(m)),
+ }
+
+ for k, v := range m {
+ hs.Keys = append(hs.Keys, k)
+ hs.Vals = append(hs.Vals, v)
+ }
+ return hs
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Sort() {
+ sort.Sort(hs)
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Len() int {
+ return len(hs.Vals)
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Less(i, j int) bool {
+ return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
+}
+
+// Additional function for function SignHeader.
+func (hs *headerSorter) Swap(i, j int) {
+ hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
+ hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
new file mode 100644
index 000000000000..ca6cb4bc5d0b
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go
@@ -0,0 +1,958 @@
+package oss
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "hash"
+ "hash/crc64"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "time"
+)
+
+// Bucket implements the operations of object.
+type Bucket struct {
+ Client Client
+ BucketName string
+}
+
+//
+// PutObject 新建Object,如果Object已存在,覆盖原有Object。
+//
+// objectKey 上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
+// reader io.Reader读取object的数据。
+// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
+// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
+ opts := addContentType(options, objectKey)
+
+ request := &PutObjectRequest{
+ ObjectKey: objectKey,
+ Reader: reader,
+ }
+ resp, err := bucket.DoPutObject(request, opts)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return err
+}
+
+//
+// PutObjectFromFile 新建Object,内容从本地文件中读取。
+//
+// objectKey 上传对象的名称。
+// filePath 本地文件,上传对象的值为该文件内容。
+// options 上传对象时可以指定对象的属性。详见PutObject的options。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
+ fd, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ opts := addContentType(options, filePath, objectKey)
+
+ request := &PutObjectRequest{
+ ObjectKey: objectKey,
+ Reader: fd,
+ }
+ resp, err := bucket.DoPutObject(request, opts)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return err
+}
+
+//
+// DoPutObject 上传文件。
+//
+// request 上传请求。
+// options 上传选项。
+//
+// Response 上传请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
+ isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
+ if !isOptSet {
+ options = addContentType(options, request.ObjectKey)
+ }
+
+ listener := getProgressListener(options)
+
+ params := map[string]interface{}{}
+ resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener)
+ if err != nil {
+ return nil, err
+ }
+
+ if bucket.getConfig().IsEnableCRC {
+ err = checkCRC(resp, "DoPutObject")
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
+
+ return resp, err
+}
+
+//
+// GetObject 下载文件。
+//
+// objectKey 下载的文件名称。
+// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
+// IfNoneMatch、AcceptEncoding,详细请参考
+// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+//
+// io.ReadCloser reader,读取数据后需要close。error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
+ result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
+ if err != nil {
+ return nil, err
+ }
+ return result.Response.Body, nil
+}
+
+//
+// GetObjectToFile 下载文件。
+//
+// objectKey 下载的文件名称。
+// filePath 下载对象的内容写到该本地文件。
+// options 对象的属性限制项。详见GetObject的options。
+//
+// error 操作无错误时返回error为nil,非nil为错误说明。
+//
+func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
+ tempFilePath := filePath + TempFileSuffix
+
+ // 读取Object内容
+ result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
+ if err != nil {
+ return err
+ }
+ defer result.Response.Body.Close()
+
+ // 如果文件不存在则创建,存在则清空
+ fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
+ if err != nil {
+ return err
+ }
+
+ // 存储数据到文件
+ _, err = io.Copy(fd, result.Response.Body)
+ fd.Close()
+ if err != nil {
+ return err
+ }
+
+ // 比较CRC值
+ hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+ if bucket.getConfig().IsEnableCRC && !hasRange {
+ result.Response.ClientCRC = result.ClientCRC.Sum64()
+ err = checkCRC(result.Response, "GetObjectToFile")
+ if err != nil {
+ os.Remove(tempFilePath)
+ return err
+ }
+ }
+
+ return os.Rename(tempFilePath, filePath)
+}
+
+//
+// DoGetObject 下载文件
+//
+// request 下载请求
+// options 对象的属性限制项。详见GetObject的options。
+//
+// GetObjectResult 下载请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
+ params := map[string]interface{}{}
+ resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &GetObjectResult{
+ Response: resp,
+ }
+
+ // crc
+ var crcCalc hash.Hash64
+ hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+ if bucket.getConfig().IsEnableCRC && !hasRange {
+ crcCalc = crc64.New(crcTable())
+ result.ServerCRC = resp.ServerCRC
+ result.ClientCRC = crcCalc
+ }
+
+ // progress
+ listener := getProgressListener(options)
+
+ contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
+ resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
+
+ return result, nil
+}
+
+//
+// CopyObject 同一个bucket内拷贝Object。
+//
+// srcObjectKey Copy的源对象。
+// destObjectKey Copy的目标对象。
+// options Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、
+// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
+// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+ var out CopyObjectResult
+ options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+ params := map[string]interface{}{}
+ resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// CopyObjectTo bucket间拷贝object。
+//
+// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。
+// destBucketName 目标Bucket名称。
+// destObjectKey 目标Object名称。
+// options Copy选项,详见CopyObject的options。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
+ return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
+}
+
+//
+// CopyObjectFrom bucket间拷贝object。
+//
+// srcBucketName 源Bucket名称。
+// srcObjectKey 源Object名称。
+// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
+// options Copy选项,详见CopyObject的options。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+ destBucketName := bucket.BucketName
+ var out CopyObjectResult
+ srcBucket, err := bucket.Client.Bucket(srcBucketName)
+ if err != nil {
+ return out, err
+ }
+
+ return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
+}
+
+func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) {
+ var out CopyObjectResult
+ options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey)))
+ headers := make(map[string]string)
+ err := handleOptions(headers, options)
+ if err != nil {
+ return out, err
+ }
+ params := map[string]interface{}{}
+ resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// AppendObject 追加方式上传。
+//
+// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0,
+// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0,
+// content-length是65536;那么,第二次Append Object需要指定position为65536。
+// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。
+//
+// objectKey 需要追加的Object。
+// reader io.Reader,读取追的内容。
+// appendPosition object追加的起始位置。
+// destObjectProperties 第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、
+// Expires、ServerSideEncryption、ObjectACL。
+//
+// int64 下次追加的开始位置,error为nil空时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
+ request := &AppendObjectRequest{
+ ObjectKey: objectKey,
+ Reader: reader,
+ Position: appendPosition,
+ }
+
+ result, err := bucket.DoAppendObject(request, options)
+ if err != nil {
+ return appendPosition, err
+ }
+
+ return result.NextPosition, err
+}
+
+//
+// DoAppendObject 追加上传。
+//
+// request 追加上传请求。
+// options 追加上传选项。
+//
+// AppendObjectResult 追加上传请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
+ params := map[string]interface{}{}
+ params["append"] = nil
+ params["position"] = strconv.FormatInt(request.Position, 10)
+ headers := make(map[string]string)
+
+ opts := addContentType(options, request.ObjectKey)
+ handleOptions(headers, opts)
+
+ var initCRC uint64
+ isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64)
+ if isCRCSet {
+ initCRC = initCRCOpt.(uint64)
+ }
+
+ listener := getProgressListener(options)
+
+ handleOptions(headers, opts)
+ resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers,
+ request.Reader, initCRC, listener)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64)
+ result := &AppendObjectResult{
+ NextPosition: nextPosition,
+ CRC: resp.ServerCRC,
+ }
+
+ if bucket.getConfig().IsEnableCRC && isCRCSet {
+ err = checkCRC(resp, "AppendObject")
+ if err != nil {
+ return result, err
+ }
+ }
+
+ return result, nil
+}
+
+//
+// DeleteObject 删除Object。
+//
+// objectKey 待删除Object。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DeleteObject(objectKey string) error {
+ params := map[string]interface{}{}
+ resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// DeleteObjects 批量删除object。
+//
+// objectKeys 待删除object类表。
+// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。
+//
+// DeleteObjectsResult 非安静模式的的返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
+ out := DeleteObjectsResult{}
+ dxml := deleteXML{}
+ for _, key := range objectKeys {
+ dxml.Objects = append(dxml.Objects, DeleteObject{Key: key})
+ }
+ isQuiet, _ := findOption(options, deleteObjectsQuiet, false)
+ dxml.Quiet = isQuiet.(bool)
+
+ bs, err := xml.Marshal(dxml)
+ if err != nil {
+ return out, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ options = append(options, ContentType(contentType))
+ sum := md5.Sum(bs)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+ options = append(options, ContentMD5(b64))
+
+ params := map[string]interface{}{}
+ params["delete"] = nil
+ params["encoding-type"] = "url"
+
+ resp, err := bucket.do("POST", "", params, options, buffer, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ if !dxml.Quiet {
+ if err = xmlUnmarshal(resp.Body, &out); err == nil {
+ err = decodeDeleteObjectsResult(&out)
+ }
+ }
+ return out, err
+}
+
+//
+// IsObjectExist object是否存在。
+//
+// bool object是否存在,true存在,false不存在。error为nil时有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
+ _, err := bucket.GetObjectMeta(objectKey)
+ if err == nil {
+ return true, nil
+ }
+
+ switch err.(type) {
+ case ServiceError:
+ if err.(ServiceError).StatusCode == 404 && err.(ServiceError).Code == "NoSuchKey" {
+ return false, nil
+ }
+ }
+
+ return false, err
+}
+
+//
+// ListObjects 获得Bucket下筛选后所有的object的列表。
+//
+// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
+//
+// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21,
+// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
+// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22,
+// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个,
+// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件,
+// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
+// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、
+// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定
+// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
+//
+// 常用场景,请参数示例sample/list_object.go。
+//
+// ListObjectsResponse 操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。
+//
+func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
+ var out ListObjectsResult
+
+ options = append(options, EncodingType("url"))
+ params, err := getRawParams(options)
+ if err != nil {
+ return out, err
+ }
+
+ resp, err := bucket.do("GET", "", params, nil, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ if err != nil {
+ return out, err
+ }
+
+ err = decodeListObjectsResult(&out)
+ return out, err
+}
+
+//
+// SetObjectMeta 设置Object的Meta。
+//
+// objectKey object
+// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、Meta。
+//
+// error 操作无错误时error为nil,非nil为错误信息。
+//
+func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
+ options = append(options, MetadataDirective(MetaReplace))
+ _, err := bucket.CopyObject(objectKey, objectKey, options...)
+ return err
+}
+
+//
+// GetObjectDetailedMeta 查询Object的头信息。
+//
+// objectKey object名称。
+// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
+// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
+//
+// http.Header 对象的meta,error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
+ params := map[string]interface{}{}
+ resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return resp.Headers, nil
+}
+
+//
+// GetObjectMeta 查询Object的头信息。
+//
+// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息,
+// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。
+//
+// objectKey object名称。
+//
+// http.Header 对象的meta,error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
+ params := map[string]interface{}{}
+ params["objectMeta"] = nil
+ //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
+ resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return resp.Headers, nil
+}
+
+//
+// SetObjectACL 修改Object的ACL权限。
+//
+// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
+// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时,
+// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。
+// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。
+//
+// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读;
+// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject,
+// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。
+//
+// objectKey 设置权限的object。
+// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
+ options := []Option{ObjectACL(objectACL)}
+ params := map[string]interface{}{}
+ params["acl"] = nil
+ resp, err := bucket.do("PUT", objectKey, params, options, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetObjectACL 获取对象的ACL权限。
+//
+// objectKey 获取权限的object。
+//
+// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
+ var out GetObjectACLResult
+ params := map[string]interface{}{}
+ params["acl"] = nil
+ resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// PutSymlink 创建符号链接。
+//
+// 符号链接的目标文件类型不能为符号链接。
+// 创建符号链接时: 不检查目标文件是否存在, 不检查目标文件类型是否合法, 不检查目标文件是否有权限访问。
+// 以上检查,都推迟到GetObject等需要访问目标文件的API。
+// 如果试图添加的文件已经存在,并且有访问权限。新添加的文件将覆盖原来的文件。
+// 如果在PutSymlink的时候,携带以x-oss-meta-为前缀的参数,则视为user meta。
+//
+// symObjectKey 要创建的符号链接文件。
+// targetObjectKey 目标文件。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
+ options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
+ params := map[string]interface{}{}
+ params["symlink"] = nil
+ resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetSymlink 获取符号链接的目标文件。
+// 如果符号链接不存在返回404。
+//
+// objectKey 获取目标文件的符号链接object。
+//
+// error 操作无错误为nil,非nil为错误信息。当error为nil时,返回的string为目标文件,否则该值无效。
+//
+func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
+ params := map[string]interface{}{}
+ params["symlink"] = nil
+ resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget)
+ targetObjectKey, err = url.QueryUnescape(targetObjectKey)
+ if err != nil {
+ return resp.Headers, err
+ }
+ resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey)
+ return resp.Headers, err
+}
+
+//
+// RestoreObject 恢复处于冷冻状态的归档类型Object进入读就绪状态。
+//
+// 一个Archive类型的object初始时处于冷冻状态。
+//
+// 针对处于冷冻状态的object调用restore命令,返回成功。object处于解冻中,服务端执行解冻,在此期间再次调用restore命令,同样成功,且不会延长object可读状态持续时间。
+// 待服务端执行完成解冻任务后,object就进入了解冻状态,此时用户可以读取object。
+// 解冻状态默认持续1天,对于解冻状态的object调用restore命令,会将object的解冻状态延长一天,最多可以延长到7天,之后object又回到初始时的冷冻状态。
+//
+// objectKey 需要恢复状态的object名称。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) RestoreObject(objectKey string) error {
+ params := map[string]interface{}{}
+ params["restore"] = nil
+ resp, err := bucket.do("POST", objectKey, params, nil, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
+}
+
+//
+// SignURL 获取签名URL。
+//
+// objectKey 获取URL的object。
+// signURLConfig 获取URL的配置。
+//
+// 返回URL字符串,error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
+ if expiredInSec < 0 {
+ return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec)
+ }
+ expiration := time.Now().Unix() + expiredInSec
+
+ params, err := getRawParams(options)
+ if err != nil {
+ return "", err
+ }
+
+ headers := make(map[string]string)
+ err = handleOptions(headers, options)
+ if err != nil {
+ return "", err
+ }
+
+ return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil
+}
+
+//
+// PutObjectWithURL 新建Object,如果Object已存在,覆盖原有Object。
+// PutObjectWithURL 不会根据key生成minetype。
+//
+// signedURL 签名的URL。
+// reader io.Reader读取object的数据。
+// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
+// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
+ resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return err
+}
+
+//
+// PutObjectFromFileWithURL 新建Object,内容从本地文件中读取。
+// PutObjectFromFileWithURL 不会根据key、filePath生成minetype。
+//
+// signedURL 签名的URL。
+// filePath 本地文件,如 dir/file.txt,上传对象的值为该文件内容。
+// options 上传对象时可以指定对象的属性。详见PutObject的options。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
+ fd, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return err
+}
+
+//
+// DoPutObjectWithURL 上传文件。
+//
+// signedURL 签名的URL。
+// reader io.Reader读取object的数据。
+// options 上传选项。
+//
+// Response 上传请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
+ listener := getProgressListener(options)
+
+ params := map[string]interface{}{}
+ resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener)
+ if err != nil {
+ return nil, err
+ }
+
+ if bucket.getConfig().IsEnableCRC {
+ err = checkCRC(resp, "DoPutObjectWithURL")
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ err = checkRespCode(resp.StatusCode, []int{http.StatusOK})
+
+ return resp, err
+}
+
+//
+// GetObjectWithURL 下载文件。
+//
+// signedURL 签名的URL。
+// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
+// IfNoneMatch、AcceptEncoding,详细请参考
+// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+//
+// io.ReadCloser reader,读取数据后需要close。error为nil时有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
+ result, err := bucket.DoGetObjectWithURL(signedURL, options)
+ if err != nil {
+ return nil, err
+ }
+ return result.Response.Body, nil
+}
+
+//
+// GetObjectToFile 下载文件。
+//
+// signedURL 签名的URL。
+// filePath 下载对象的内容写到该本地文件。
+// options 对象的属性限制项。详见GetObject的options。
+//
+// error 操作无错误时返回error为nil,非nil为错误说明。
+//
+func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
+ tempFilePath := filePath + TempFileSuffix
+
+ // 读取Object内容
+ result, err := bucket.DoGetObjectWithURL(signedURL, options)
+ if err != nil {
+ return err
+ }
+ defer result.Response.Body.Close()
+
+ // 如果文件不存在则创建,存在则清空
+ fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
+ if err != nil {
+ return err
+ }
+
+ // 存储数据到文件
+ _, err = io.Copy(fd, result.Response.Body)
+ fd.Close()
+ if err != nil {
+ return err
+ }
+
+ // 比较CRC值
+ hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+ if bucket.getConfig().IsEnableCRC && !hasRange {
+ result.Response.ClientCRC = result.ClientCRC.Sum64()
+ err = checkCRC(result.Response, "GetObjectToFileWithURL")
+ if err != nil {
+ os.Remove(tempFilePath)
+ return err
+ }
+ }
+
+ return os.Rename(tempFilePath, filePath)
+}
+
+//
+// DoGetObjectWithURL 下载文件
+//
+// signedURL 签名的URL。
+// options 对象的属性限制项。详见GetObject的options。
+//
+// GetObjectResult 下载请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
+ params := map[string]interface{}{}
+ resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &GetObjectResult{
+ Response: resp,
+ }
+
+ // crc
+ var crcCalc hash.Hash64
+ hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
+ if bucket.getConfig().IsEnableCRC && !hasRange {
+ crcCalc = crc64.New(crcTable())
+ result.ServerCRC = resp.ServerCRC
+ result.ClientCRC = crcCalc
+ }
+
+ // progress
+ listener := getProgressListener(options)
+
+ contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
+ resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
+
+ return result, nil
+}
+
+// Private
+func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
+ data io.Reader, listener ProgressListener) (*Response, error) {
+ headers := make(map[string]string)
+ err := handleOptions(headers, options)
+ if err != nil {
+ return nil, err
+ }
+ return bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
+ params, headers, data, 0, listener)
+}
+
+func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option,
+ data io.Reader, listener ProgressListener) (*Response, error) {
+ headers := make(map[string]string)
+ err := handleOptions(headers, options)
+ if err != nil {
+ return nil, err
+ }
+ return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener)
+}
+
+func (bucket Bucket) getConfig() *Config {
+ return bucket.Client.Config
+}
+
+func addContentType(options []Option, keys ...string) []Option {
+ typ := TypeByExtension("")
+ for _, key := range keys {
+ typ = TypeByExtension(key)
+ if typ != "" {
+ break
+ }
+ }
+
+ if typ == "" {
+ typ = "application/octet-stream"
+ }
+
+ opts := []Option{ContentType(typ)}
+ opts = append(opts, options...)
+
+ return opts
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
new file mode 100644
index 000000000000..22c62976763e
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go
@@ -0,0 +1,802 @@
+// Package oss implements functions for access oss service.
+// It has two main struct Client and Bucket.
+package oss
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//
+// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket,
+// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
+// 用户用oss.New创建Client。
+//
+type (
+ // Client oss client
+ Client struct {
+ Config *Config // Oss Client configure
+ Conn *Conn // Send http request
+ }
+
+ // ClientOption client option such as UseCname, Timeout, SecurityToken.
+ ClientOption func(*Client)
+)
+
+//
+// New 生成一个新的Client。
+//
+// endpoint 用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。
+// accessKeyId 用户标识。
+// accessKeySecret 用户密钥。
+//
+// Client 生成的新Client。error为nil时有效。
+// error 操作无错误时为nil,非nil时表示操作出错。
+//
+func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
+ // configuration
+ config := getDefaultOssConfig()
+ config.Endpoint = endpoint
+ config.AccessKeyID = accessKeyID
+ config.AccessKeySecret = accessKeySecret
+
+ // url parse
+ url := &urlMaker{}
+ url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
+
+ // http connect
+ conn := &Conn{config: config, url: url}
+
+ // oss client
+ client := &Client{
+ config,
+ conn,
+ }
+
+ // client options parse
+ for _, option := range options {
+ option(client)
+ }
+
+ // create http connect
+ err := conn.init(config, url)
+
+ return client, err
+}
+
+//
+// Bucket 取存储空间(Bucket)的对象实例。
+//
+// bucketName 存储空间名称。
+// Bucket 新的Bucket。error为nil时有效。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) Bucket(bucketName string) (*Bucket, error) {
+ return &Bucket{
+ client,
+ bucketName,
+ }, nil
+}
+
+//
+// CreateBucket 创建Bucket。
+//
+// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-,
+// 必须以小写字母或者数字开头,长度必须在3-255字节之间。
+// options 创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、
+// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。可以使用StorageClass选项设置bucket的存储方式,目前支持:标准存储模式(StorageStandard)、 低频存储模式(StorageIA)、 归档存储模式(StorageArchive)。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) CreateBucket(bucketName string, options ...Option) error {
+ headers := make(map[string]string)
+ handleOptions(headers, options)
+
+ buffer := new(bytes.Buffer)
+
+ isOptSet, val, _ := isOptionSet(options, storageClass)
+ if isOptSet {
+ cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
+ bs, err := xml.Marshal(cbConfig)
+ if err != nil {
+ return err
+ }
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers[HTTPHeaderContentType] = contentType
+ }
+
+ params := map[string]interface{}{}
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// ListBuckets 获取当前用户下的bucket。
+//
+// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
+// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。
+// 常用使用场景的实现,参数示例程序list_bucket.go。
+// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
+ var out ListBucketsResult
+
+ params, err := getRawParams(options)
+ if err != nil {
+ return out, err
+ }
+
+ resp, err := client.do("GET", "", params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// IsBucketExist Bucket是否存在。
+//
+// bucketName 存储空间名称。
+//
+// bool 存储空间是否存在。error为nil时有效。
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) IsBucketExist(bucketName string) (bool, error) {
+ listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
+ if err != nil {
+ return false, err
+ }
+
+ if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName {
+ return true, nil
+ }
+ return false, nil
+}
+
+//
+// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
+//
+// bucketName 存储空间名称。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) DeleteBucket(bucketName string) error {
+ params := map[string]interface{}{}
+ resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLocation 查看Bucket所属数据中心位置的信息。
+//
+// 如果您想了解"访问域名和数据中心"详细信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
+//
+// bucketName 存储空间名称。
+//
+// string Bucket所属的数据中心位置信息。
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) GetBucketLocation(bucketName string) (string, error) {
+ params := map[string]interface{}{}
+ params["location"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ var LocationConstraint string
+ err = xmlUnmarshal(resp.Body, &LocationConstraint)
+ return LocationConstraint, err
+}
+
+//
+// SetBucketACL 修改Bucket的访问权限。
+//
+// bucketName 存储空间名称。
+// bucketAcl bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、
+// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。
+//
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
+ headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
+ params := map[string]interface{}{}
+ resp, err := client.do("PUT", bucketName, params, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetBucketACL 获得Bucket的访问权限。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。
+// error 操作无错误时返回nil,非nil为错误信息。
+//
+func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
+ var out GetBucketACLResult
+ params := map[string]interface{}{}
+ params["acl"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// SetBucketLifecycle 修改Bucket的生命周期设置。
+//
+// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。
+// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置,
+// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
+//
+// bucketName 存储空间名称。
+// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。
+// 具体用法请参考示例程序sample/bucket_lifecycle.go。
+//
+// error 操作无错误时返回error为nil,非nil为错误信息。
+//
+func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
+ lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
+ bs, err := xml.Marshal(lxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["lifecycle"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketLifecycle 删除Bucket的生命周期设置。
+//
+//
+// bucketName 存储空间名称。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketLifecycle(bucketName string) error {
+ params := map[string]interface{}{}
+ params["lifecycle"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLifecycle 查看Bucket的生命周期设置。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。
+// error 操作无错误时为nil,非nil为错误信息。
+//
+func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
+ var out GetBucketLifecycleResult
+ params := map[string]interface{}{}
+ params["lifecycle"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
+//
+// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
+// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket,
+// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
+// 这个bucket中的object。如果您还需要了解更多信息,请参看
+// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
+//
+// bucketName 存储空间名称。
+// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
+// 用法请参看示例sample/bucket_referer.go
+// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
+ rxml := RefererXML{}
+ rxml.AllowEmptyReferer = allowEmptyReferer
+ if referers == nil {
+ rxml.RefererList = append(rxml.RefererList, "")
+ } else {
+ for _, referer := range referers {
+ rxml.RefererList = append(rxml.RefererList, referer)
+ }
+ }
+
+ bs, err := xml.Marshal(rxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["referer"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// GetBucketReferer 获得Bucket的白名单地址。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。
+// error 操作无错误时为nil,非nil为错误信息。
+//
+func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
+ var out GetBucketRefererResult
+ params := map[string]interface{}{}
+ params["referer"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// SetBucketLogging 修改Bucket的日志设置。
+//
+// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后,
+// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。
+// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
+//
+// bucketName 需要记录访问日志的Bucket。
+// targetBucket 访问日志记录到的Bucket。
+// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
+ isEnable bool) error {
+ var err error
+ var bs []byte
+ if isEnable {
+ lxml := LoggingXML{}
+ lxml.LoggingEnabled.TargetBucket = targetBucket
+ lxml.LoggingEnabled.TargetPrefix = targetPrefix
+ bs, err = xml.Marshal(lxml)
+ } else {
+ lxml := loggingXMLEmpty{}
+ bs, err = xml.Marshal(lxml)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["logging"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketLogging 删除Bucket的日志设置。
+//
+// bucketName 需要删除访问日志的Bucket。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketLogging(bucketName string) error {
+ params := map[string]interface{}{}
+ params["logging"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketLogging 获得Bucket的日志设置。
+//
+// bucketName 需要删除访问日志的Bucket。
+// GetBucketLoggingResponse 操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
+ var out GetBucketLoggingResult
+ params := map[string]interface{}{}
+ params["logging"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
+//
+// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
+// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
+//
+// bucketName 需要设置Website的Bucket。
+// indexDocument 索引文档。
+// errorDocument 错误文档。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
+ wxml := WebsiteXML{}
+ wxml.IndexDocument.Suffix = indexDocument
+ wxml.ErrorDocument.Key = errorDocument
+
+ bs, err := xml.Marshal(wxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := make(map[string]string)
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["website"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketWebsite 删除Bucket的Website设置。
+//
+// bucketName 需要删除website设置的Bucket。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketWebsite(bucketName string) error {
+ params := map[string]interface{}{}
+ params["website"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketWebsite 获得Bucket的默认首页以及错误页。
+//
+// bucketName 存储空间名称。
+//
+// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
+ var out GetBucketWebsiteResult
+ params := map[string]interface{}{}
+ params["website"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
+//
+// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
+//
+// bucketName 需要设置Website的Bucket。
+// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
+ corsxml := CORSXML{}
+ for _, v := range corsRules {
+ cr := CORSRule{}
+ cr.AllowedMethod = v.AllowedMethod
+ cr.AllowedOrigin = v.AllowedOrigin
+ cr.AllowedHeader = v.AllowedHeader
+ cr.ExposeHeader = v.ExposeHeader
+ cr.MaxAgeSeconds = v.MaxAgeSeconds
+ corsxml.CORSRules = append(corsxml.CORSRules, cr)
+ }
+
+ bs, err := xml.Marshal(corsxml)
+ if err != nil {
+ return err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ contentType := http.DetectContentType(buffer.Bytes())
+ headers := map[string]string{}
+ headers[HTTPHeaderContentType] = contentType
+
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("PUT", bucketName, params, headers, buffer)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+//
+// DeleteBucketCORS 删除Bucket的Website设置。
+//
+// bucketName 需要删除cors设置的Bucket。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) DeleteBucketCORS(bucketName string) error {
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("DELETE", bucketName, params, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// GetBucketCORS 获得Bucket的CORS设置。
+//
+//
+// bucketName 存储空间名称。
+// GetBucketCORSResult 操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
+ var out GetBucketCORSResult
+ params := map[string]interface{}{}
+ params["cors"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// GetBucketInfo 获得Bucket的信息。
+//
+// bucketName 存储空间名称。
+// GetBucketInfoResult 操作成功的返回值,error为nil时该返回值有效。
+//
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
+ var out GetBucketInfoResult
+ params := map[string]interface{}{}
+ params["bucketInfo"] = nil
+ resp, err := client.do("GET", bucketName, params, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// UseCname 设置是否使用CNAME,默认不使用。
+//
+// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false
+//
+func UseCname(isUseCname bool) ClientOption {
+ return func(client *Client) {
+ client.Config.IsCname = isUseCname
+ client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+ }
+}
+
+//
+// Timeout 设置HTTP超时时间。
+//
+// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。
+// readWriteTimeout HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。
+//
+func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
+ return func(client *Client) {
+ client.Config.HTTPTimeout.ConnectTimeout =
+ time.Second * time.Duration(connectTimeoutSec)
+ client.Config.HTTPTimeout.ReadWriteTimeout =
+ time.Second * time.Duration(readWriteTimeout)
+ client.Config.HTTPTimeout.HeaderTimeout =
+ time.Second * time.Duration(readWriteTimeout)
+ client.Config.HTTPTimeout.IdleConnTimeout =
+ time.Second * time.Duration(readWriteTimeout)
+ client.Config.HTTPTimeout.LongTimeout =
+ time.Second * time.Duration(readWriteTimeout*10)
+ }
+}
+
+//
+// SecurityToken 临时用户设置SecurityToken。
+//
+// token STS token
+//
+func SecurityToken(token string) ClientOption {
+ return func(client *Client) {
+ client.Config.SecurityToken = strings.TrimSpace(token)
+ }
+}
+
+//
+// EnableMD5 是否启用MD5校验,默认启用。
+//
+// isEnableMD5 true启用MD5校验,false不启用MD5校验
+//
+func EnableMD5(isEnableMD5 bool) ClientOption {
+ return func(client *Client) {
+ client.Config.IsEnableMD5 = isEnableMD5
+ }
+}
+
+//
+// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。
+//
+// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5
+//
+func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
+ return func(client *Client) {
+ client.Config.MD5Threshold = threshold
+ }
+}
+
+//
+// EnableCRC 上传是否启用CRC校验,默认启用。
+//
+// isEnableCRC true启用CRC校验,false不启用CRC校验
+//
+func EnableCRC(isEnableCRC bool) ClientOption {
+ return func(client *Client) {
+ client.Config.IsEnableCRC = isEnableCRC
+ }
+}
+
+//
+// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
+//
+// userAgent user agent字符串。
+//
+func UserAgent(userAgent string) ClientOption {
+ return func(client *Client) {
+ client.Config.UserAgent = userAgent
+ }
+}
+
+//
+// Proxy 设置代理服务器,默认不使用代理。
+//
+// proxyHost 代理服务器地址,格式是host或host:port
+//
+func Proxy(proxyHost string) ClientOption {
+ return func(client *Client) {
+ client.Config.IsUseProxy = true
+ client.Config.ProxyHost = proxyHost
+ client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+ }
+}
+
+//
+// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
+//
+// proxyHost 代理服务器地址,格式是host或host:port
+// proxyUser 代理服务器认证的用户名
+// proxyPassword 代理服务器认证的用户密码
+//
+func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
+ return func(client *Client) {
+ client.Config.IsUseProxy = true
+ client.Config.ProxyHost = proxyHost
+ client.Config.IsAuthProxy = true
+ client.Config.ProxyUser = proxyUser
+ client.Config.ProxyPassword = proxyPassword
+ client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy)
+ }
+}
+
+// Private
+func (client Client) do(method, bucketName string, params map[string]interface{},
+ headers map[string]string, data io.Reader) (*Response, error) {
+ return client.Conn.Do(method, bucketName, "", params,
+ headers, data, 0, nil)
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
new file mode 100644
index 000000000000..f2dc34e1834c
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go
@@ -0,0 +1,69 @@
+package oss
+
+import (
+ "time"
+)
+
+// HTTPTimeout http timeout
+type HTTPTimeout struct {
+ ConnectTimeout time.Duration
+ ReadWriteTimeout time.Duration
+ HeaderTimeout time.Duration
+ LongTimeout time.Duration
+ IdleConnTimeout time.Duration
+}
+
+// Config oss configure
+type Config struct {
+ Endpoint string // oss地址
+ AccessKeyID string // accessId
+ AccessKeySecret string // accessKey
+ RetryTimes uint // 失败重试次数,默认5
+ UserAgent string // SDK名称/版本/系统信息
+ IsDebug bool // 是否开启调试模式,默认false
+ Timeout uint // 超时时间,默认60s
+ SecurityToken string // STS Token
+ IsCname bool // Endpoint是否是CNAME
+ HTTPTimeout HTTPTimeout // HTTP的超时时间设置
+ IsUseProxy bool // 是否使用代理
+ ProxyHost string // 代理服务器地址
+ IsAuthProxy bool // 代理服务器是否使用用户认证
+ ProxyUser string // 代理服务器认证用户名
+ ProxyPassword string // 代理服务器认证密码
+ IsEnableMD5 bool // 上传数据时是否启用MD5校验
+ MD5Threshold int64 // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte
+ IsEnableCRC bool // 上传数据时是否启用CRC64校验
+}
+
+// 获取默认配置
+func getDefaultOssConfig() *Config {
+ config := Config{}
+
+ config.Endpoint = ""
+ config.AccessKeyID = ""
+ config.AccessKeySecret = ""
+ config.RetryTimes = 5
+ config.IsDebug = false
+ config.UserAgent = userAgent
+ config.Timeout = 60 // seconds
+ config.SecurityToken = ""
+ config.IsCname = false
+
+ config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
+ config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
+ config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
+ config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
+ config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
+
+ config.IsUseProxy = false
+ config.ProxyHost = ""
+ config.IsAuthProxy = false
+ config.ProxyUser = ""
+ config.ProxyPassword = ""
+
+ config.MD5Threshold = 16 * 1024 * 1024 // 16MB
+ config.IsEnableMD5 = false
+ config.IsEnableCRC = true
+
+ return &config
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
new file mode 100644
index 000000000000..13227708f3f3
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go
@@ -0,0 +1,599 @@
+package oss
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Conn oss conn
+type Conn struct {
+ config *Config
+ url *urlMaker
+ client *http.Client
+}
+
+var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore"}
+
+// init 初始化Conn
+func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
+ // new Transport
+ transport := newTransport(conn, config)
+
+ // Proxy
+ if conn.config.IsUseProxy {
+ proxyURL, err := url.Parse(config.ProxyHost)
+ if err != nil {
+ return err
+ }
+ transport.Proxy = http.ProxyURL(proxyURL)
+ }
+
+ conn.config = config
+ conn.url = urlMaker
+ conn.client = &http.Client{Transport: transport}
+
+ return nil
+}
+
+// Do 处理请求,返回响应结果。
+func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
+ data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
+ urlParams := conn.getURLParams(params)
+ subResource := conn.getSubResource(params)
+ uri := conn.url.getURL(bucketName, objectName, urlParams)
+ resource := conn.url.getResource(bucketName, objectName, subResource)
+ return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
+}
+
+// DoURL 根据已签名的URL处理请求,返回响应结果。
+func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
+ data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
+ // get uri form signedURL
+ uri, err := url.ParseRequestURI(signedURL)
+ if err != nil {
+ return nil, err
+ }
+
+ m := strings.ToUpper(string(method))
+ req := &http.Request{
+ Method: m,
+ URL: uri,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: uri.Host,
+ }
+
+ tracker := &readerTracker{completedBytes: 0}
+ fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
+ if fd != nil {
+ defer func() {
+ fd.Close()
+ os.Remove(fd.Name())
+ }()
+ }
+
+ if conn.config.IsAuthProxy {
+ auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
+ basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+ req.Header.Set("Proxy-Authorization", basic)
+ }
+
+ req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+ req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header.Set(k, v)
+ }
+ }
+
+ // transfer started
+ event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+ publishProgress(listener, event)
+
+ resp, err := conn.client.Do(req)
+ if err != nil {
+ // transfer failed
+ event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+ publishProgress(listener, event)
+ return nil, err
+ }
+
+ // transfer completed
+ event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+ publishProgress(listener, event)
+
+ return conn.handleResponse(resp, crc)
+}
+
+func (conn Conn) getURLParams(params map[string]interface{}) string {
+ // sort
+ keys := make([]string, 0, len(params))
+ for k := range params {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ // serialize
+ var buf bytes.Buffer
+ for _, k := range keys {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(url.QueryEscape(k))
+ if params[k] != nil {
+ buf.WriteString("=" + url.QueryEscape(params[k].(string)))
+ }
+ }
+
+ return buf.String()
+}
+
+func (conn Conn) getSubResource(params map[string]interface{}) string {
+ // sort
+ keys := make([]string, 0, len(params))
+ for k := range params {
+ if conn.isParamSign(k) {
+ keys = append(keys, k)
+ }
+ }
+ sort.Strings(keys)
+
+ // serialize
+ var buf bytes.Buffer
+ for _, k := range keys {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(k)
+ if params[k] != nil {
+ buf.WriteString("=" + params[k].(string))
+ }
+ }
+
+ return buf.String()
+}
+
+func (conn Conn) isParamSign(paramKey string) bool {
+ for _, k := range signKeyList {
+ if paramKey == k {
+ return true
+ }
+ }
+ return false
+}
+
+func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string,
+ data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
+ method = strings.ToUpper(method)
+ req := &http.Request{
+ Method: method,
+ URL: uri,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: uri.Host,
+ }
+
+ tracker := &readerTracker{completedBytes: 0}
+ fd, crc := conn.handleBody(req, data, initCRC, listener, tracker)
+ if fd != nil {
+ defer func() {
+ fd.Close()
+ os.Remove(fd.Name())
+ }()
+ }
+
+ if conn.config.IsAuthProxy {
+ auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
+ basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+ req.Header.Set("Proxy-Authorization", basic)
+ }
+
+ date := time.Now().UTC().Format(http.TimeFormat)
+ req.Header.Set(HTTPHeaderDate, date)
+ req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+ req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
+ if conn.config.SecurityToken != "" {
+ req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header.Set(k, v)
+ }
+ }
+
+ conn.signHeader(req, canonicalizedResource)
+
+ // transfer started
+ event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+ publishProgress(listener, event)
+
+ resp, err := conn.client.Do(req)
+ if err != nil {
+ // transfer failed
+ event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+ publishProgress(listener, event)
+ return nil, err
+ }
+
+ // transfer completed
+ event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+ publishProgress(listener, event)
+
+ return conn.handleResponse(resp, crc)
+}
+
+func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
+ if conn.config.SecurityToken != "" {
+ params[HTTPParamSecurityToken] = conn.config.SecurityToken
+ }
+ subResource := conn.getSubResource(params)
+ canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
+
+ m := strings.ToUpper(string(method))
+ req := &http.Request{
+ Method: m,
+ Header: make(http.Header),
+ }
+
+ if conn.config.IsAuthProxy {
+ auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword
+ basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+ req.Header.Set("Proxy-Authorization", basic)
+ }
+
+ req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10))
+ req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
+ req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header.Set(k, v)
+ }
+ }
+
+ signedStr := conn.getSignedStr(req, canonicalizedResource)
+
+ params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
+ params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
+ params[HTTPParamSignature] = signedStr
+
+ urlParams := conn.getURLParams(params)
+ return conn.url.getSignURL(bucketName, objectName, urlParams)
+}
+
+// handle request body
+func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
+ listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
+ var file *os.File
+ var crc hash.Hash64
+ reader := body
+
+ // length
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ case *bytes.Reader:
+ req.ContentLength = int64(v.Len())
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ case *os.File:
+ req.ContentLength = tryGetFileSize(v)
+ case *io.LimitedReader:
+ req.ContentLength = int64(v.N)
+ }
+ req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
+
+ // md5
+ if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
+ md5 := ""
+ reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
+ req.Header.Set(HTTPHeaderContentMD5, md5)
+ }
+
+ // crc
+ if reader != nil && conn.config.IsEnableCRC {
+ crc = NewCRC(crcTable(), initCRC)
+ reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
+ }
+
+ // http body
+ rc, ok := reader.(io.ReadCloser)
+ if !ok && reader != nil {
+ rc = ioutil.NopCloser(reader)
+ }
+ req.Body = rc
+
+ return file, crc
+}
+
+func tryGetFileSize(f *os.File) int64 {
+ fInfo, _ := f.Stat()
+ return fInfo.Size()
+}
+
+// handle response
+func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
+ var cliCRC uint64
+ var srvCRC uint64
+
+ statusCode := resp.StatusCode
+ if statusCode >= 400 && statusCode <= 505 {
+ // 4xx and 5xx indicate that the operation has error occurred
+ var respBody []byte
+ respBody, err := readResponseBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(respBody) == 0 {
+ // no error in response body
+ err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
+ } else {
+ // response contains storage service error object, unmarshal
+ srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
+ resp.Header.Get(HTTPHeaderOssRequestID))
+ if err != nil { // error unmarshaling the error response
+ err = errIn
+ }
+ err = srvErr
+ }
+
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
+ }, err
+ } else if statusCode >= 300 && statusCode <= 307 {
+ // oss use 3xx, but response has no body
+ err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: resp.Body,
+ }, err
+ }
+
+ if conn.config.IsEnableCRC && crc != nil {
+ cliCRC = crc.Sum64()
+ }
+ srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64)
+
+ // 2xx, successful
+ return &Response{
+ StatusCode: resp.StatusCode,
+ Headers: resp.Header,
+ Body: resp.Body,
+ ClientCRC: cliCRC,
+ ServerCRC: srvCRC,
+ }, nil
+}
+
+func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
+ if contentLen == 0 || contentLen > md5Threshold {
+ // huge body, use temporary file
+ tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
+ if tempFile != nil {
+ io.Copy(tempFile, body)
+ tempFile.Seek(0, os.SEEK_SET)
+ md5 := md5.New()
+ io.Copy(md5, tempFile)
+ sum := md5.Sum(nil)
+ b64 = base64.StdEncoding.EncodeToString(sum[:])
+ tempFile.Seek(0, os.SEEK_SET)
+ reader = tempFile
+ }
+ } else {
+ // small body, use memory
+ buf, _ := ioutil.ReadAll(body)
+ sum := md5.Sum(buf)
+ b64 = base64.StdEncoding.EncodeToString(sum[:])
+ reader = bytes.NewReader(buf)
+ }
+ return
+}
+
+func readResponseBody(resp *http.Response) ([]byte, error) {
+ defer resp.Body.Close()
+ out, err := ioutil.ReadAll(resp.Body)
+ if err == io.EOF {
+ err = nil
+ }
+ return out, err
+}
+
+func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
+ var storageErr ServiceError
+ if err := xml.Unmarshal(body, &storageErr); err != nil {
+ return storageErr, err
+ }
+ storageErr.StatusCode = statusCode
+ storageErr.RequestID = requestID
+ storageErr.RawMessage = string(body)
+ return storageErr, nil
+}
+
+func xmlUnmarshal(body io.Reader, v interface{}) error {
+ data, err := ioutil.ReadAll(body)
+ if err != nil {
+ return err
+ }
+ return xml.Unmarshal(data, v)
+}
+
+// Handle http timeout
+type timeoutConn struct {
+ conn net.Conn
+ timeout time.Duration
+ longTimeout time.Duration
+}
+
+func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn {
+ conn.SetReadDeadline(time.Now().Add(longTimeout))
+ return &timeoutConn{
+ conn: conn,
+ timeout: timeout,
+ longTimeout: longTimeout,
+ }
+}
+
+func (c *timeoutConn) Read(b []byte) (n int, err error) {
+ c.SetReadDeadline(time.Now().Add(c.timeout))
+ n, err = c.conn.Read(b)
+ c.SetReadDeadline(time.Now().Add(c.longTimeout))
+ return n, err
+}
+
+func (c *timeoutConn) Write(b []byte) (n int, err error) {
+ c.SetWriteDeadline(time.Now().Add(c.timeout))
+ n, err = c.conn.Write(b)
+ c.SetReadDeadline(time.Now().Add(c.longTimeout))
+ return n, err
+}
+
+func (c *timeoutConn) Close() error {
+ return c.conn.Close()
+}
+
+func (c *timeoutConn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+func (c *timeoutConn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+func (c *timeoutConn) SetDeadline(t time.Time) error {
+ return c.conn.SetDeadline(t)
+}
+
+func (c *timeoutConn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
+ return c.conn.SetWriteDeadline(t)
+}
+
+// UrlMaker - build url and resource
+const (
+ urlTypeCname = 1
+ urlTypeIP = 2
+ urlTypeAliyun = 3
+)
+
+type urlMaker struct {
+ Scheme string // http or https
+ NetLoc string // host or ip
+ Type int // 1 CNAME 2 IP 3 ALIYUN
+ IsProxy bool // proxy
+}
+
+// Parse endpoint
+func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
+ if strings.HasPrefix(endpoint, "http://") {
+ um.Scheme = "http"
+ um.NetLoc = endpoint[len("http://"):]
+ } else if strings.HasPrefix(endpoint, "https://") {
+ um.Scheme = "https"
+ um.NetLoc = endpoint[len("https://"):]
+ } else {
+ um.Scheme = "http"
+ um.NetLoc = endpoint
+ }
+
+ host, _, err := net.SplitHostPort(um.NetLoc)
+ if err != nil {
+ host = um.NetLoc
+ }
+ ip := net.ParseIP(host)
+ if ip != nil {
+ um.Type = urlTypeIP
+ } else if isCname {
+ um.Type = urlTypeCname
+ } else {
+ um.Type = urlTypeAliyun
+ }
+ um.IsProxy = isProxy
+}
+
+// Build URL
+func (um urlMaker) getURL(bucket, object, params string) *url.URL {
+ host, path := um.buildURL(bucket, object)
+ addr := ""
+ if params == "" {
+ addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path)
+ } else {
+ addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
+ }
+ uri, _ := url.ParseRequestURI(addr)
+ return uri
+}
+
+// Build Sign URL
+func (um urlMaker) getSignURL(bucket, object, params string) string {
+ host, path := um.buildURL(bucket, object)
+ return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
+}
+
+// Build URL
+func (um urlMaker) buildURL(bucket, object string) (string, string) {
+ var host = ""
+ var path = ""
+
+ object = url.QueryEscape(object)
+ object = strings.Replace(object, "+", "%20", -1)
+
+ if um.Type == urlTypeCname {
+ host = um.NetLoc
+ path = "/" + object
+ } else if um.Type == urlTypeIP {
+ if bucket == "" {
+ host = um.NetLoc
+ path = "/"
+ } else {
+ host = um.NetLoc
+ path = fmt.Sprintf("/%s/%s", bucket, object)
+ }
+ } else {
+ if bucket == "" {
+ host = um.NetLoc
+ path = "/"
+ } else {
+ host = bucket + "." + um.NetLoc
+ path = "/" + object
+ }
+ }
+
+ return host, path
+}
+
+// Canonicalized Resource
+func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
+ if subResource != "" {
+ subResource = "?" + subResource
+ }
+ if bucketName == "" {
+ return fmt.Sprintf("/%s%s", bucketName, subResource)
+ }
+ return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource)
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
new file mode 100644
index 000000000000..84816d2cda68
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go
@@ -0,0 +1,132 @@
+package oss
+
+import "os"
+
+// ACLType Bucket/Object的访问控制
+type ACLType string
+
+const (
+ // ACLPrivate 私有读写
+ ACLPrivate ACLType = "private"
+
+ // ACLPublicRead 公共读私有写
+ ACLPublicRead ACLType = "public-read"
+
+ // ACLPublicReadWrite 公共读写
+ ACLPublicReadWrite ACLType = "public-read-write"
+
+ // ACLDefault Object默认权限,Bucket无此权限
+ ACLDefault ACLType = "default"
+)
+
+// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
+type MetadataDirectiveType string
+
+const (
+ // MetaCopy 目标对象使用源对象的META
+ MetaCopy MetadataDirectiveType = "COPY"
+
+ // MetaReplace 目标对象使用自定义的META
+ MetaReplace MetadataDirectiveType = "REPLACE"
+)
+
+// StorageClassType Bucket的存储类型
+type StorageClassType string
+
+const (
+ // StorageStandard 标准存储模式
+ StorageStandard StorageClassType = "Standard"
+
+ // StorageIA 低频存储模式
+ StorageIA StorageClassType = "IA"
+
+ // StorageArchive 归档存储模式
+ StorageArchive StorageClassType = "Archive"
+)
+
+// HTTPMethod HTTP请求方法
+type HTTPMethod string
+
+const (
+ // HTTPGet HTTP请求方法 GET
+ HTTPGet HTTPMethod = "GET"
+
+ // HTTPPut HTTP请求方法 PUT
+ HTTPPut HTTPMethod = "PUT"
+
+ // HTTPHead HTTP请求方法 HEAD
+ HTTPHead HTTPMethod = "HEAD"
+
+ // HTTPPost HTTP请求方法 POST
+ HTTPPost HTTPMethod = "POST"
+
+ // HTTPDelete HTTP请求方法 DELETE
+ HTTPDelete HTTPMethod = "DELETE"
+)
+
+// Http头标签
+const (
+ HTTPHeaderAcceptEncoding string = "Accept-Encoding"
+ HTTPHeaderAuthorization = "Authorization"
+ HTTPHeaderCacheControl = "Cache-Control"
+ HTTPHeaderContentDisposition = "Content-Disposition"
+ HTTPHeaderContentEncoding = "Content-Encoding"
+ HTTPHeaderContentLength = "Content-Length"
+ HTTPHeaderContentMD5 = "Content-MD5"
+ HTTPHeaderContentType = "Content-Type"
+ HTTPHeaderContentLanguage = "Content-Language"
+ HTTPHeaderDate = "Date"
+ HTTPHeaderEtag = "ETag"
+ HTTPHeaderExpires = "Expires"
+ HTTPHeaderHost = "Host"
+ HTTPHeaderLastModified = "Last-Modified"
+ HTTPHeaderRange = "Range"
+ HTTPHeaderLocation = "Location"
+ HTTPHeaderOrigin = "Origin"
+ HTTPHeaderServer = "Server"
+ HTTPHeaderUserAgent = "User-Agent"
+ HTTPHeaderIfModifiedSince = "If-Modified-Since"
+ HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
+ HTTPHeaderIfMatch = "If-Match"
+ HTTPHeaderIfNoneMatch = "If-None-Match"
+
+ HTTPHeaderOssACL = "X-Oss-Acl"
+ HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
+ HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
+ HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
+ HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
+ HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
+ HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
+ HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
+ HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
+ HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
+ HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
+ HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
+ HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
+ HTTPHeaderOssRequestID = "X-Oss-Request-Id"
+ HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
+ HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
+)
+
+// Http Param
+const (
+ HTTPParamExpires = "Expires"
+ HTTPParamAccessKeyID = "OSSAccessKeyId"
+ HTTPParamSignature = "Signature"
+ HTTPParamSecurityToken = "security-token"
+)
+
+// 其它常量
+const (
+ MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB
+ MinPartSize = 100 * 1024 // 文件片最小值,100KBß
+
+ FilePermMode = os.FileMode(0664) // 新建文件默认权限
+
+ TempFilePrefix = "oss-go-temp-" // 临时文件前缀
+ TempFileSuffix = ".temp" // 临时文件后缀
+
+ CheckpointFileSuffix = ".cp" // Checkpoint文件后缀
+
+ Version = "1.8.0" // Go sdk版本
+)
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
new file mode 100644
index 000000000000..fb9eb12452a0
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go
@@ -0,0 +1,123 @@
+package oss
+
+import (
+ "hash"
+ "hash/crc64"
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ crc uint64
+ tab *crc64.Table
+}
+
+// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
+// using the polynomial represented by the Table.
+func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
+
+// Size returns the number of bytes Sum will return.
+func (d *digest) Size() int { return crc64.Size }
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (d *digest) BlockSize() int { return 1 }
+
+// Reset resets the Hash to its initial state.
+func (d *digest) Reset() { d.crc = 0 }
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (d *digest) Write(p []byte) (n int, err error) {
+ d.crc = crc64.Update(d.crc, d.tab, p)
+ return len(p), nil
+}
+
+// Sum64 returns crc64 value.
+func (d *digest) Sum64() uint64 { return d.crc }
+
+// Sum returns hash value.
+func (d *digest) Sum(in []byte) []byte {
+ s := d.Sum64()
+ return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
+
+// gf2Dim dimension of GF(2) vectors (length of CRC)
+const gf2Dim int = 64
+
+func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
+ var sum uint64
+ for i := 0; vec != 0; i++ {
+ if vec&1 != 0 {
+ sum ^= mat[i]
+ }
+
+ vec >>= 1
+ }
+ return sum
+}
+
+func gf2MatrixSquare(square []uint64, mat []uint64) {
+ for n := 0; n < gf2Dim; n++ {
+ square[n] = gf2MatrixTimes(mat, mat[n])
+ }
+}
+
+// CRC64Combine combine crc64
+func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
+ var even [gf2Dim]uint64 // even-power-of-two zeros operator
+ var odd [gf2Dim]uint64 // odd-power-of-two zeros operator
+
+ // Degenerate case
+ if len2 == 0 {
+ return crc1
+ }
+
+ // Put operator for one zero bit in odd
+ odd[0] = crc64.ECMA // CRC64 polynomial
+ var row uint64 = 1
+ for n := 1; n < gf2Dim; n++ {
+ odd[n] = row
+ row <<= 1
+ }
+
+ // Put operator for two zero bits in even
+ gf2MatrixSquare(even[:], odd[:])
+
+ // Put operator for four zero bits in odd
+ gf2MatrixSquare(odd[:], even[:])
+
+ // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
+ for {
+ // Apply zeros operator for this bit of len2
+ gf2MatrixSquare(even[:], odd[:])
+
+ if len2&1 != 0 {
+ crc1 = gf2MatrixTimes(even[:], crc1)
+ }
+
+ len2 >>= 1
+
+ // If no more bits set, then done
+ if len2 == 0 {
+ break
+ }
+
+ // Another iteration of the loop with odd and even swapped
+ gf2MatrixSquare(odd[:], even[:])
+ if len2&1 != 0 {
+ crc1 = gf2MatrixTimes(odd[:], crc1)
+ }
+ len2 >>= 1
+
+ // If no more bits set, then done
+ if len2 == 0 {
+ break
+ }
+ }
+
+ // Return combined crc
+ crc1 ^= crc2
+ return crc1
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
new file mode 100644
index 000000000000..648a5a59e0af
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go
@@ -0,0 +1,551 @@
+package oss
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "hash"
+ "hash/crc64"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+)
+
+//
+// DownloadFile 分片下载文件
+//
+// objectKey object key。
+// filePath 本地文件。objectKey下载到文件。
+// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options Object的属性限制项。详见GetObject。
+//
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
+ if partSize < 1 {
+ return errors.New("oss: part size smaller than 1")
+ }
+
+ cpConf, err := getCpConfig(options, filePath)
+ if err != nil {
+ return err
+ }
+
+ uRange, err := getRangeConfig(options)
+ if err != nil {
+ return err
+ }
+
+ routines := getRoutines(options)
+
+ if cpConf.IsEnable {
+ return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines, uRange)
+ }
+
+ return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
+}
+
+// 获取下载范围
+func getRangeConfig(options []Option) (*unpackedRange, error) {
+ rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
+ if err != nil || rangeOpt == nil {
+ return nil, err
+ }
+ return parseRange(rangeOpt.(string))
+}
+
+// ----- 并发无断点的下载 -----
+
+// 工作协程参数
+type downloadWorkerArg struct {
+ bucket *Bucket
+ key string
+ filePath string
+ options []Option
+ hook downloadPartHook
+ enableCRC bool
+}
+
+// Hook用于测试
+type downloadPartHook func(part downloadPart) error
+
+var downloadPartHooker downloadPartHook = defaultDownloadPartHook
+
+func defaultDownloadPartHook(part downloadPart) error {
+ return nil
+}
+
+// 默认ProgressListener,屏蔽GetObject的Options中ProgressListener
+type defaultDownloadProgressListener struct {
+}
+
+// ProgressChanged 静默处理
+func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
+}
+
+// 工作协程
+func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
+ for part := range jobs {
+ if err := arg.hook(part); err != nil {
+ failed <- err
+ break
+ }
+
+ // resolve options
+ r := Range(part.Start, part.End)
+ p := Progress(&defaultDownloadProgressListener{})
+ opts := make([]Option, len(arg.options)+2)
+ // append orderly, can not be reversed!
+ opts = append(opts, arg.options...)
+ opts = append(opts, r, p)
+
+ rd, err := arg.bucket.GetObject(arg.key, opts...)
+ if err != nil {
+ failed <- err
+ break
+ }
+ defer rd.Close()
+
+ var crcCalc hash.Hash64
+ if arg.enableCRC {
+ crcCalc = crc64.New(crcTable())
+ contentLen := part.End - part.Start + 1
+ rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
+ }
+ defer rd.Close()
+
+ select {
+ case <-die:
+ return
+ default:
+ }
+
+ fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
+ if err != nil {
+ failed <- err
+ break
+ }
+
+ _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
+ if err != nil {
+ fd.Close()
+ failed <- err
+ break
+ }
+
+ _, err = io.Copy(fd, rd)
+ if err != nil {
+ fd.Close()
+ failed <- err
+ break
+ }
+
+ if arg.enableCRC {
+ part.CRC64 = crcCalc.Sum64()
+ }
+
+ fd.Close()
+ results <- part
+ }
+}
+
+// 调度协程
+func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
+ for _, part := range parts {
+ jobs <- part
+ }
+ close(jobs)
+}
+
+// 下载片
+type downloadPart struct {
+ Index int // 片序号,从0开始编号
+ Start int64 // 片起始位置
+ End int64 // 片结束位置
+ Offset int64 // 文件中的偏移位置
+ CRC64 uint64 // 片的校验值
+}
+
+// 文件分片
+func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) {
+ meta, err := bucket.GetObjectDetailedMeta(objectKey)
+ if err != nil {
+ return nil, false, 0, err
+ }
+
+ parts := []downloadPart{}
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return nil, false, 0, err
+ }
+
+ enableCRC := false
+ crcVal := (uint64)(0)
+ if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+ if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+ enableCRC = true
+ crcVal, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+ }
+ }
+
+ part := downloadPart{}
+ i := 0
+ start, end := adjustRange(uRange, objectSize)
+ for offset := start; offset < end; offset += partSize {
+ part.Index = i
+ part.Start = offset
+ part.End = GetPartEnd(offset, end, partSize)
+ part.Offset = start
+ part.CRC64 = 0
+ parts = append(parts, part)
+ i++
+ }
+ return parts, enableCRC, crcVal, nil
+}
+
+// 文件大小
+func getObjectBytes(parts []downloadPart) int64 {
+ var ob int64
+ for _, part := range parts {
+ ob += (part.End - part.Start + 1)
+ }
+ return ob
+}
+
+// 计算连续分片总的CRC
+func combineCRCInParts(dps []downloadPart) uint64 {
+ if dps == nil || len(dps) == 0 {
+ return 0
+ }
+
+ crc := dps[0].CRC64
+ for i := 1; i < len(dps); i++ {
+ crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
+ }
+
+ return crc
+}
+
+// 并发无断点续传的下载
+func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
+ tempFilePath := filePath + TempFileSuffix
+ listener := getProgressListener(options)
+
+ // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+ fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
+ if err != nil {
+ return err
+ }
+ fd.Close()
+
+ // 分割文件
+ parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange)
+ if err != nil {
+ return err
+ }
+
+ jobs := make(chan downloadPart, len(parts))
+ results := make(chan downloadPart, len(parts))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ var completedBytes int64
+ totalBytes := getObjectBytes(parts)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
+ for w := 1; w <= routines; w++ {
+ go downloadWorker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发上传分片
+ go downloadScheduler(jobs, parts)
+
+ // 等待分片下载完成
+ completed := 0
+ for completed < len(parts) {
+ select {
+ case part := <-results:
+ completed++
+ completedBytes += (part.End - part.Start + 1)
+ parts[part.Index].CRC64 = part.CRC64
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ return err
+ }
+
+ if completed >= len(parts) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+
+ if enableCRC {
+ actualCRC := combineCRCInParts(parts)
+ err = checkDownloadCRC(actualCRC, expectedCRC)
+ if err != nil {
+ return err
+ }
+ }
+
+ return os.Rename(tempFilePath, filePath)
+}
+
+// ----- 并发有断点的下载 -----
+
+const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
+
+type downloadCheckpoint struct {
+ Magic string // magic
+ MD5 string // cp内容的MD5
+ FilePath string // 本地文件
+ Object string // key
+ ObjStat objectStat // 文件状态
+ Parts []downloadPart // 全部分片
+ PartStat []bool // 分片下载是否完成
+ Start int64 // 起点
+ End int64 // 终点
+ enableCRC bool // 是否有CRC校验
+ CRC uint64 // CRC校验值
+}
+
+type objectStat struct {
+ Size int64 // 大小
+ LastModified string // 最后修改时间
+ Etag string // etag
+}
+
+// CP数据是否有效,CP有效且Object没有更新时有效
+func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) {
+ // 比较CP的Magic及MD5
+ cpb := cp
+ cpb.MD5 = ""
+ js, _ := json.Marshal(cpb)
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+ if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
+ return false, nil
+ }
+
+ // 确认object没有更新
+ meta, err := bucket.GetObjectDetailedMeta(objectKey)
+ if err != nil {
+ return false, err
+ }
+
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return false, err
+ }
+
+ // 比较Object的大小/最后修改时间/etag
+ if cp.ObjStat.Size != objectSize ||
+ cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
+ cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
+ return false, nil
+ }
+
+ // 确认下载范围是否变化
+ if uRange != nil {
+ start, end := adjustRange(uRange, objectSize)
+ if start != cp.Start || end != cp.End {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// 从文件中load
+func (cp *downloadCheckpoint) load(filePath string) error {
+ contents, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(contents, cp)
+ return err
+}
+
+// dump到文件
+func (cp *downloadCheckpoint) dump(filePath string) error {
+ bcp := *cp
+
+ // 计算MD5
+ bcp.MD5 = ""
+ js, err := json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+ bcp.MD5 = b64
+
+ // 序列化
+ js, err = json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+
+ // dump
+ return ioutil.WriteFile(filePath, js, FilePermMode)
+}
+
+// 未完成的分片
+func (cp downloadCheckpoint) todoParts() []downloadPart {
+ dps := []downloadPart{}
+ for i, ps := range cp.PartStat {
+ if !ps {
+ dps = append(dps, cp.Parts[i])
+ }
+ }
+ return dps
+}
+
+// 完成的字节数
+func (cp downloadCheckpoint) getCompletedBytes() int64 {
+ var completedBytes int64
+ for i, part := range cp.Parts {
+ if cp.PartStat[i] {
+ completedBytes += (part.End - part.Start + 1)
+ }
+ }
+ return completedBytes
+}
+
+// 初始化下载任务
+func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
+ // cp
+ cp.Magic = downloadCpMagic
+ cp.FilePath = filePath
+ cp.Object = objectKey
+
+ // object
+ meta, err := bucket.GetObjectDetailedMeta(objectKey)
+ if err != nil {
+ return err
+ }
+
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ cp.ObjStat.Size = objectSize
+ cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
+ cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
+
+ // parts
+ cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange)
+ if err != nil {
+ return err
+ }
+ cp.PartStat = make([]bool, len(cp.Parts))
+ for i := range cp.PartStat {
+ cp.PartStat[i] = false
+ }
+
+ return nil
+}
+
+func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
+ os.Remove(cpFilePath)
+ return os.Rename(downFilepath, cp.FilePath)
+}
+
+// 并发带断点的下载
+func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
+ tempFilePath := filePath + TempFileSuffix
+ listener := getProgressListener(options)
+
+ // LOAD CP数据
+ dcp := downloadCheckpoint{}
+ err := dcp.load(cpFilePath)
+ if err != nil {
+ os.Remove(cpFilePath)
+ }
+
+ // LOAD出错或数据无效重新初始化下载
+ valid, err := dcp.isValid(&bucket, objectKey, uRange)
+ if err != nil || !valid {
+ if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil {
+ return err
+ }
+ os.Remove(cpFilePath)
+ }
+
+ // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+ fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
+ if err != nil {
+ return err
+ }
+ fd.Close()
+
+ // 未完成的分片
+ parts := dcp.todoParts()
+ jobs := make(chan downloadPart, len(parts))
+ results := make(chan downloadPart, len(parts))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ completedBytes := dcp.getCompletedBytes()
+ event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
+ for w := 1; w <= routines; w++ {
+ go downloadWorker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发下载分片
+ go downloadScheduler(jobs, parts)
+
+ // 等待分片下载完成
+ completed := 0
+ for completed < len(parts) {
+ select {
+ case part := <-results:
+ completed++
+ dcp.PartStat[part.Index] = true
+ dcp.Parts[part.Index].CRC64 = part.CRC64
+ dcp.dump(cpFilePath)
+ completedBytes += (part.End - part.Start + 1)
+ event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size)
+ publishProgress(listener, event)
+ return err
+ }
+
+ if completed >= len(parts) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
+ publishProgress(listener, event)
+
+ if dcp.enableCRC {
+ actualCRC := combineCRCInParts(dcp.Parts)
+ err = checkDownloadCRC(actualCRC, dcp.CRC)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dcp.complete(cpFilePath, tempFilePath)
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
new file mode 100644
index 000000000000..a54b9d7650d9
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go
@@ -0,0 +1,89 @@
+package oss
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// ServiceError contains fields of the error response from Oss Service REST API.
+type ServiceError struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"` // OSS返回给用户的错误码
+ Message string `xml:"Message"` // OSS给出的详细错误信息
+ RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID
+ HostID string `xml:"HostId"` // 用于标识访问的OSS集群
+ RawMessage string // OSS返回的原始消息内容
+ StatusCode int // HTTP状态码
+}
+
+// Implement interface error
+func (e ServiceError) Error() string {
+ return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
+ e.StatusCode, e.Code, e.Message, e.RequestID)
+}
+
+// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
+// nor with an HTTP status code indicating success.
+type UnexpectedStatusCodeError struct {
+ allowed []int // 预期OSS返回HTTP状态码
+ got int // OSS实际返回HTTP状态码
+}
+
+// Implement interface error
+func (e UnexpectedStatusCodeError) Error() string {
+ s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
+
+ got := s(e.got)
+ expected := []string{}
+ for _, v := range e.allowed {
+ expected = append(expected, s(v))
+ }
+ return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
+ got, strings.Join(expected, " or "))
+}
+
+// Got is the actual status code returned by oss.
+func (e UnexpectedStatusCodeError) Got() int {
+ return e.got
+}
+
+// checkRespCode returns UnexpectedStatusError if the given response code is not
+// one of the allowed status codes; otherwise nil.
+func checkRespCode(respCode int, allowed []int) error {
+ for _, v := range allowed {
+ if respCode == v {
+ return nil
+ }
+ }
+ return UnexpectedStatusCodeError{allowed, respCode}
+}
+
+// CRCCheckError is returned when crc check is inconsistent between client and server
+type CRCCheckError struct {
+ clientCRC uint64 // 客户端计算的CRC64值
+ serverCRC uint64 // 服务端计算的CRC64值
+ operation string // 上传操作,如PutObject/AppendObject/UploadPart等
+ requestID string // 本次操作的RequestID
+}
+
+// Implement interface error
+func (e CRCCheckError) Error() string {
+ return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
+ e.operation, e.clientCRC, e.serverCRC, e.requestID)
+}
+
+func checkDownloadCRC(clientCRC, serverCRC uint64) error {
+ if clientCRC == serverCRC {
+ return nil
+ }
+ return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
+}
+
+func checkCRC(resp *Response, operation string) error {
+ if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
+ return nil
+ }
+ return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
new file mode 100644
index 000000000000..e2ed9ce103e8
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go
@@ -0,0 +1,245 @@
+package oss
+
+import (
+ "mime"
+ "path"
+ "strings"
+)
+
+var extToMimeType = map[string]string{
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
+ ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
+ ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
+ ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
+ ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
+ ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
+ ".apk": "application/vnd.android.package-archive",
+ ".hqx": "application/mac-binhex40",
+ ".cpt": "application/mac-compactpro",
+ ".doc": "application/msword",
+ ".ogg": "application/ogg",
+ ".pdf": "application/pdf",
+ ".rtf": "text/rtf",
+ ".mif": "application/vnd.mif",
+ ".xls": "application/vnd.ms-excel",
+ ".ppt": "application/vnd.ms-powerpoint",
+ ".odc": "application/vnd.oasis.opendocument.chart",
+ ".odb": "application/vnd.oasis.opendocument.database",
+ ".odf": "application/vnd.oasis.opendocument.formula",
+ ".odg": "application/vnd.oasis.opendocument.graphics",
+ ".otg": "application/vnd.oasis.opendocument.graphics-template",
+ ".odi": "application/vnd.oasis.opendocument.image",
+ ".odp": "application/vnd.oasis.opendocument.presentation",
+ ".otp": "application/vnd.oasis.opendocument.presentation-template",
+ ".ods": "application/vnd.oasis.opendocument.spreadsheet",
+ ".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
+ ".odt": "application/vnd.oasis.opendocument.text",
+ ".odm": "application/vnd.oasis.opendocument.text-master",
+ ".ott": "application/vnd.oasis.opendocument.text-template",
+ ".oth": "application/vnd.oasis.opendocument.text-web",
+ ".sxw": "application/vnd.sun.xml.writer",
+ ".stw": "application/vnd.sun.xml.writer.template",
+ ".sxc": "application/vnd.sun.xml.calc",
+ ".stc": "application/vnd.sun.xml.calc.template",
+ ".sxd": "application/vnd.sun.xml.draw",
+ ".std": "application/vnd.sun.xml.draw.template",
+ ".sxi": "application/vnd.sun.xml.impress",
+ ".sti": "application/vnd.sun.xml.impress.template",
+ ".sxg": "application/vnd.sun.xml.writer.global",
+ ".sxm": "application/vnd.sun.xml.math",
+ ".sis": "application/vnd.symbian.install",
+ ".wbxml": "application/vnd.wap.wbxml",
+ ".wmlc": "application/vnd.wap.wmlc",
+ ".wmlsc": "application/vnd.wap.wmlscriptc",
+ ".bcpio": "application/x-bcpio",
+ ".torrent": "application/x-bittorrent",
+ ".bz2": "application/x-bzip2",
+ ".vcd": "application/x-cdlink",
+ ".pgn": "application/x-chess-pgn",
+ ".cpio": "application/x-cpio",
+ ".csh": "application/x-csh",
+ ".dvi": "application/x-dvi",
+ ".spl": "application/x-futuresplash",
+ ".gtar": "application/x-gtar",
+ ".hdf": "application/x-hdf",
+ ".jar": "application/x-java-archive",
+ ".jnlp": "application/x-java-jnlp-file",
+ ".js": "application/x-javascript",
+ ".ksp": "application/x-kspread",
+ ".chrt": "application/x-kchart",
+ ".kil": "application/x-killustrator",
+ ".latex": "application/x-latex",
+ ".rpm": "application/x-rpm",
+ ".sh": "application/x-sh",
+ ".shar": "application/x-shar",
+ ".swf": "application/x-shockwave-flash",
+ ".sit": "application/x-stuffit",
+ ".sv4cpio": "application/x-sv4cpio",
+ ".sv4crc": "application/x-sv4crc",
+ ".tar": "application/x-tar",
+ ".tcl": "application/x-tcl",
+ ".tex": "application/x-tex",
+ ".man": "application/x-troff-man",
+ ".me": "application/x-troff-me",
+ ".ms": "application/x-troff-ms",
+ ".ustar": "application/x-ustar",
+ ".src": "application/x-wais-source",
+ ".zip": "application/zip",
+ ".m3u": "audio/x-mpegurl",
+ ".ra": "audio/x-pn-realaudio",
+ ".wav": "audio/x-wav",
+ ".wma": "audio/x-ms-wma",
+ ".wax": "audio/x-ms-wax",
+ ".pdb": "chemical/x-pdb",
+ ".xyz": "chemical/x-xyz",
+ ".bmp": "image/bmp",
+ ".gif": "image/gif",
+ ".ief": "image/ief",
+ ".png": "image/png",
+ ".wbmp": "image/vnd.wap.wbmp",
+ ".ras": "image/x-cmu-raster",
+ ".pnm": "image/x-portable-anymap",
+ ".pbm": "image/x-portable-bitmap",
+ ".pgm": "image/x-portable-graymap",
+ ".ppm": "image/x-portable-pixmap",
+ ".rgb": "image/x-rgb",
+ ".xbm": "image/x-xbitmap",
+ ".xpm": "image/x-xpixmap",
+ ".xwd": "image/x-xwindowdump",
+ ".css": "text/css",
+ ".rtx": "text/richtext",
+ ".tsv": "text/tab-separated-values",
+ ".jad": "text/vnd.sun.j2me.app-descriptor",
+ ".wml": "text/vnd.wap.wml",
+ ".wmls": "text/vnd.wap.wmlscript",
+ ".etx": "text/x-setext",
+ ".mxu": "video/vnd.mpegurl",
+ ".flv": "video/x-flv",
+ ".wm": "video/x-ms-wm",
+ ".wmv": "video/x-ms-wmv",
+ ".wmx": "video/x-ms-wmx",
+ ".wvx": "video/x-ms-wvx",
+ ".avi": "video/x-msvideo",
+ ".movie": "video/x-sgi-movie",
+ ".ice": "x-conference/x-cooltalk",
+ ".3gp": "video/3gpp",
+ ".ai": "application/postscript",
+ ".aif": "audio/x-aiff",
+ ".aifc": "audio/x-aiff",
+ ".aiff": "audio/x-aiff",
+ ".asc": "text/plain",
+ ".atom": "application/atom+xml",
+ ".au": "audio/basic",
+ ".bin": "application/octet-stream",
+ ".cdf": "application/x-netcdf",
+ ".cgm": "image/cgm",
+ ".class": "application/octet-stream",
+ ".dcr": "application/x-director",
+ ".dif": "video/x-dv",
+ ".dir": "application/x-director",
+ ".djv": "image/vnd.djvu",
+ ".djvu": "image/vnd.djvu",
+ ".dll": "application/octet-stream",
+ ".dmg": "application/octet-stream",
+ ".dms": "application/octet-stream",
+ ".dtd": "application/xml-dtd",
+ ".dv": "video/x-dv",
+ ".dxr": "application/x-director",
+ ".eps": "application/postscript",
+ ".exe": "application/octet-stream",
+ ".ez": "application/andrew-inset",
+ ".gram": "application/srgs",
+ ".grxml": "application/srgs+xml",
+ ".gz": "application/x-gzip",
+ ".htm": "text/html",
+ ".html": "text/html",
+ ".ico": "image/x-icon",
+ ".ics": "text/calendar",
+ ".ifb": "text/calendar",
+ ".iges": "model/iges",
+ ".igs": "model/iges",
+ ".jp2": "image/jp2",
+ ".jpe": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".jpg": "image/jpeg",
+ ".kar": "audio/midi",
+ ".lha": "application/octet-stream",
+ ".lzh": "application/octet-stream",
+ ".m4a": "audio/mp4a-latm",
+ ".m4p": "audio/mp4a-latm",
+ ".m4u": "video/vnd.mpegurl",
+ ".m4v": "video/x-m4v",
+ ".mac": "image/x-macpaint",
+ ".mathml": "application/mathml+xml",
+ ".mesh": "model/mesh",
+ ".mid": "audio/midi",
+ ".midi": "audio/midi",
+ ".mov": "video/quicktime",
+ ".mp2": "audio/mpeg",
+ ".mp3": "audio/mpeg",
+ ".mp4": "video/mp4",
+ ".mpe": "video/mpeg",
+ ".mpeg": "video/mpeg",
+ ".mpg": "video/mpeg",
+ ".mpga": "audio/mpeg",
+ ".msh": "model/mesh",
+ ".nc": "application/x-netcdf",
+ ".oda": "application/oda",
+ ".ogv": "video/ogv",
+ ".pct": "image/pict",
+ ".pic": "image/pict",
+ ".pict": "image/pict",
+ ".pnt": "image/x-macpaint",
+ ".pntg": "image/x-macpaint",
+ ".ps": "application/postscript",
+ ".qt": "video/quicktime",
+ ".qti": "image/x-quicktime",
+ ".qtif": "image/x-quicktime",
+ ".ram": "audio/x-pn-realaudio",
+ ".rdf": "application/rdf+xml",
+ ".rm": "application/vnd.rn-realmedia",
+ ".roff": "application/x-troff",
+ ".sgm": "text/sgml",
+ ".sgml": "text/sgml",
+ ".silo": "model/mesh",
+ ".skd": "application/x-koan",
+ ".skm": "application/x-koan",
+ ".skp": "application/x-koan",
+ ".skt": "application/x-koan",
+ ".smi": "application/smil",
+ ".smil": "application/smil",
+ ".snd": "audio/basic",
+ ".so": "application/octet-stream",
+ ".svg": "image/svg+xml",
+ ".t": "application/x-troff",
+ ".texi": "application/x-texinfo",
+ ".texinfo": "application/x-texinfo",
+ ".tif": "image/tiff",
+ ".tiff": "image/tiff",
+ ".tr": "application/x-troff",
+ ".txt": "text/plain",
+ ".vrml": "model/vrml",
+ ".vxml": "application/voicexml+xml",
+ ".webm": "video/webm",
+ ".wrl": "model/vrml",
+ ".xht": "application/xhtml+xml",
+ ".xhtml": "application/xhtml+xml",
+ ".xml": "application/xml",
+ ".xsl": "application/xml",
+ ".xslt": "application/xslt+xml",
+ ".xul": "application/vnd.mozilla.xul+xml",
+}
+
+// TypeByExtension returns the MIME type associated with the file extension ext.
+// 获取文件类型,选项ContentType使用
+func TypeByExtension(filePath string) string {
+ typ := mime.TypeByExtension(path.Ext(filePath))
+ if typ == "" {
+ typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
+ }
+ return typ
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
new file mode 100644
index 000000000000..7c71b0181866
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go
@@ -0,0 +1,60 @@
+package oss
+
+import (
+ "hash"
+ "io"
+ "net/http"
+)
+
+// Response Http response from oss
+type Response struct {
+ StatusCode int
+ Headers http.Header
+ Body io.ReadCloser
+ ClientCRC uint64
+ ServerCRC uint64
+}
+
+// PutObjectRequest The request of DoPutObject
+type PutObjectRequest struct {
+ ObjectKey string
+ Reader io.Reader
+}
+
+// GetObjectRequest The request of DoGetObject
+type GetObjectRequest struct {
+ ObjectKey string
+}
+
+// GetObjectResult The result of DoGetObject
+type GetObjectResult struct {
+ Response *Response
+ ClientCRC hash.Hash64
+ ServerCRC uint64
+}
+
+// AppendObjectRequest The requtest of DoAppendObject
+type AppendObjectRequest struct {
+ ObjectKey string
+ Reader io.Reader
+ Position int64
+}
+
+// AppendObjectResult The result of DoAppendObject
+type AppendObjectResult struct {
+ NextPosition int64
+ CRC uint64
+}
+
+// UploadPartRequest The request of DoUploadPart
+type UploadPartRequest struct {
+ InitResult *InitiateMultipartUploadResult
+ Reader io.Reader
+ PartSize int64
+ PartNumber int
+}
+
+// UploadPartResult The result of DoUploadPart
+type UploadPartResult struct {
+ Part UploadPart
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
new file mode 100644
index 000000000000..a33b48870ef9
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go
@@ -0,0 +1,461 @@
+package oss
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+//
+// CopyFile 分片复制文件
+//
+// srcBucketName 源Bucket名称。
+// srcObjectKey 源Object名称。
+// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。
+// partSize 复制文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options Object的属性限制项。详见InitiateMultipartUpload。
+//
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
+ destBucketName := bucket.BucketName
+ if partSize < MinPartSize || partSize > MaxPartSize {
+ return errors.New("oss: part size invalid range (1024KB, 5GB]")
+ }
+
+ cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
+ if err != nil {
+ return err
+ }
+
+ routines := getRoutines(options)
+
+ if cpConf.IsEnable {
+ return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
+ partSize, options, cpConf.FilePath, routines)
+ }
+
+ return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
+ partSize, options, routines)
+}
+
+// ----- 并发无断点的下载 -----
+
+// 工作协程参数
+type copyWorkerArg struct {
+ bucket *Bucket
+ imur InitiateMultipartUploadResult
+ srcBucketName string
+ srcObjectKey string
+ options []Option
+ hook copyPartHook
+}
+
+// Hook用于测试
+type copyPartHook func(part copyPart) error
+
+var copyPartHooker copyPartHook = defaultCopyPartHook
+
+func defaultCopyPartHook(part copyPart) error {
+ return nil
+}
+
+// 工作协程
+func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
+ for chunk := range jobs {
+ if err := arg.hook(chunk); err != nil {
+ failed <- err
+ break
+ }
+ chunkSize := chunk.End - chunk.Start + 1
+ part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
+ chunk.Start, chunkSize, chunk.Number, arg.options...)
+ if err != nil {
+ failed <- err
+ break
+ }
+ select {
+ case <-die:
+ return
+ default:
+ }
+ results <- part
+ }
+}
+
+// 调度协程
+func copyScheduler(jobs chan copyPart, parts []copyPart) {
+ for _, part := range parts {
+ jobs <- part
+ }
+ close(jobs)
+}
+
+// 分片
+type copyPart struct {
+ Number int // 片序号[1, 10000]
+ Start int64 // 片起始位置
+ End int64 // 片结束位置
+}
+
+// 文件分片
+func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
+ meta, err := bucket.GetObjectDetailedMeta(objectKey)
+ if err != nil {
+ return nil, err
+ }
+
+ parts := []copyPart{}
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ part := copyPart{}
+ i := 0
+ for offset := int64(0); offset < objectSize; offset += partSize {
+ part.Number = i + 1
+ part.Start = offset
+ part.End = GetPartEnd(offset, objectSize, partSize)
+ parts = append(parts, part)
+ i++
+ }
+ return parts, nil
+}
+
+// 获取源文件大小
+func getSrcObjectBytes(parts []copyPart) int64 {
+ var ob int64
+ for _, part := range parts {
+ ob += (part.End - part.Start + 1)
+ }
+ return ob
+}
+
+// 并发无断点续传的下载
+func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
+ partSize int64, options []Option, routines int) error {
+ descBucket, err := bucket.Client.Bucket(destBucketName)
+ srcBucket, err := bucket.Client.Bucket(srcBucketName)
+ listener := getProgressListener(options)
+
+ // 分割文件
+ parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
+ if err != nil {
+ return err
+ }
+
+ // 初始化上传任务
+ imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
+ if err != nil {
+ return err
+ }
+
+ jobs := make(chan copyPart, len(parts))
+ results := make(chan UploadPart, len(parts))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ var completedBytes int64
+ totalBytes := getSrcObjectBytes(parts)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+ for w := 1; w <= routines; w++ {
+ go copyWorker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发上传分片
+ go copyScheduler(jobs, parts)
+
+ // 等待分片下载完成
+ completed := 0
+ ups := make([]UploadPart, len(parts))
+ for completed < len(parts) {
+ select {
+ case part := <-results:
+ completed++
+ ups[part.PartNumber-1] = part
+ completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ descBucket.AbortMultipartUpload(imur)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ return err
+ }
+
+ if completed >= len(parts) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+
+ // 提交任务
+ _, err = descBucket.CompleteMultipartUpload(imur, ups)
+ if err != nil {
+ bucket.AbortMultipartUpload(imur)
+ return err
+ }
+ return nil
+}
+
+// ----- 并发有断点的下载 -----
+
+const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
+
+type copyCheckpoint struct {
+ Magic string // magic
+ MD5 string // cp内容的MD5
+ SrcBucketName string // 源Bucket
+ SrcObjectKey string // 源Object
+ DestBucketName string // 目标Bucket
+ DestObjectKey string // 目标Bucket
+ CopyID string // copy id
+ ObjStat objectStat // 文件状态
+ Parts []copyPart // 全部分片
+ CopyParts []UploadPart // 分片上传成功后的返回值
+ PartStat []bool // 分片下载是否完成
+}
+
+// CP数据是否有效,CP有效且Object没有更新时有效
+func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
+ // 比较CP的Magic及MD5
+ cpb := cp
+ cpb.MD5 = ""
+ js, _ := json.Marshal(cpb)
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+ if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
+ return false, nil
+ }
+
+ // 确认object没有更新
+ meta, err := bucket.GetObjectDetailedMeta(objectKey)
+ if err != nil {
+ return false, err
+ }
+
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return false, err
+ }
+
+ // 比较Object的大小/最后修改时间/etag
+ if cp.ObjStat.Size != objectSize ||
+ cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
+ cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// 从文件中load
+func (cp *copyCheckpoint) load(filePath string) error {
+ contents, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(contents, cp)
+ return err
+}
+
+// 更新分片状态
+func (cp *copyCheckpoint) update(part UploadPart) {
+ cp.CopyParts[part.PartNumber-1] = part
+ cp.PartStat[part.PartNumber-1] = true
+}
+
+// dump到文件
+func (cp *copyCheckpoint) dump(filePath string) error {
+ bcp := *cp
+
+ // 计算MD5
+ bcp.MD5 = ""
+ js, err := json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+ bcp.MD5 = b64
+
+ // 序列化
+ js, err = json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+
+ // dump
+ return ioutil.WriteFile(filePath, js, FilePermMode)
+}
+
+// 未完成的分片
+func (cp copyCheckpoint) todoParts() []copyPart {
+ dps := []copyPart{}
+ for i, ps := range cp.PartStat {
+ if !ps {
+ dps = append(dps, cp.Parts[i])
+ }
+ }
+ return dps
+}
+
+// 完成的字节数
+func (cp copyCheckpoint) getCompletedBytes() int64 {
+ var completedBytes int64
+ for i, part := range cp.Parts {
+ if cp.PartStat[i] {
+ completedBytes += (part.End - part.Start + 1)
+ }
+ }
+ return completedBytes
+}
+
+// 初始化下载任务
+func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
+ partSize int64, options []Option) error {
+ // cp
+ cp.Magic = copyCpMagic
+ cp.SrcBucketName = srcBucket.BucketName
+ cp.SrcObjectKey = srcObjectKey
+ cp.DestBucketName = destBucket.BucketName
+ cp.DestObjectKey = destObjectKey
+
+ // object
+ meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
+ if err != nil {
+ return err
+ }
+
+ objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ cp.ObjStat.Size = objectSize
+ cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
+ cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
+
+ // parts
+ cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
+ if err != nil {
+ return err
+ }
+ cp.PartStat = make([]bool, len(cp.Parts))
+ for i := range cp.PartStat {
+ cp.PartStat[i] = false
+ }
+ cp.CopyParts = make([]UploadPart, len(cp.Parts))
+
+ // init copy
+ imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
+ if err != nil {
+ return err
+ }
+ cp.CopyID = imur.UploadID
+
+ return nil
+}
+
+func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+ imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
+ Key: cp.DestObjectKey, UploadID: cp.CopyID}
+ _, err := bucket.CompleteMultipartUpload(imur, parts)
+ if err != nil {
+ return err
+ }
+ os.Remove(cpFilePath)
+ return err
+}
+
+// 并发带断点的下载
+func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
+ partSize int64, options []Option, cpFilePath string, routines int) error {
+ descBucket, err := bucket.Client.Bucket(destBucketName)
+ srcBucket, err := bucket.Client.Bucket(srcBucketName)
+ listener := getProgressListener(options)
+
+ // LOAD CP数据
+ ccp := copyCheckpoint{}
+ err = ccp.load(cpFilePath)
+ if err != nil {
+ os.Remove(cpFilePath)
+ }
+
+ // LOAD出错或数据无效重新初始化下载
+ valid, err := ccp.isValid(srcBucket, srcObjectKey)
+ if err != nil || !valid {
+ if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
+ return err
+ }
+ os.Remove(cpFilePath)
+ }
+
+ // 未完成的分片
+ parts := ccp.todoParts()
+ imur := InitiateMultipartUploadResult{
+ Bucket: destBucketName,
+ Key: destObjectKey,
+ UploadID: ccp.CopyID}
+
+ jobs := make(chan copyPart, len(parts))
+ results := make(chan UploadPart, len(parts))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ completedBytes := ccp.getCompletedBytes()
+ event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+ for w := 1; w <= routines; w++ {
+ go copyWorker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发下载分片
+ go copyScheduler(jobs, parts)
+
+ // 等待分片下载完成
+ completed := 0
+ for completed < len(parts) {
+ select {
+ case part := <-results:
+ completed++
+ ccp.update(part)
+ ccp.dump(cpFilePath)
+ completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+ event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size)
+ publishProgress(listener, event)
+ return err
+ }
+
+ if completed >= len(parts) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
+ publishProgress(listener, event)
+
+ return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
new file mode 100644
index 000000000000..de8ea8fde776
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go
@@ -0,0 +1,291 @@
+package oss
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+)
+
+//
+// InitiateMultipartUpload 初始化分片上传任务。
+//
+// objectKey Object名称。
+// options 上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
+// ServerSideEncryption、Meta,具体含义请参考
+// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
+//
+// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
+ var imur InitiateMultipartUploadResult
+ opts := addContentType(options, objectKey)
+ params := map[string]interface{}{}
+ params["uploads"] = nil
+ resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
+ if err != nil {
+ return imur, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &imur)
+ return imur, err
+}
+
+//
+// UploadPart 上传分片。
+//
+// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。
+// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID,
+// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据,
+// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB;
+// 最后一片Part没有大小限制。
+//
+// imur InitiateMultipartUpload成功后的返回值。
+// reader io.Reader 需要分片上传的reader。
+// size 本次上传片Part的大小。
+// partNumber 本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber;
+// ETag及上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
+ partSize int64, partNumber int, options ...Option) (UploadPart, error) {
+ request := &UploadPartRequest{
+ InitResult: &imur,
+ Reader: reader,
+ PartSize: partSize,
+ PartNumber: partNumber,
+ }
+
+ result, err := bucket.DoUploadPart(request, options)
+
+ return result.Part, err
+}
+
+//
+// UploadPartFromFile 上传分片。
+//
+// imur InitiateMultipartUpload成功后的返回值。
+// filePath 需要分片上传的本地文件。
+// startPosition 本次上传文件片的起始位置。
+// partSize 本次上传文件片的大小。
+// partNumber 本次上传文件片的编号,范围是1~10000。
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber;
+// ETag上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
+ startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
+ var part = UploadPart{}
+ fd, err := os.Open(filePath)
+ if err != nil {
+ return part, err
+ }
+ defer fd.Close()
+ fd.Seek(startPosition, os.SEEK_SET)
+
+ request := &UploadPartRequest{
+ InitResult: &imur,
+ Reader: fd,
+ PartSize: partSize,
+ PartNumber: partNumber,
+ }
+
+ result, err := bucket.DoUploadPart(request, options)
+
+ return result.Part, err
+}
+
+//
+// DoUploadPart 上传分片。
+//
+// request 上传分片请求。
+//
+// UploadPartResult 上传分片请求返回值。
+// error 操作无错误为nil,非nil为错误信息。
+//
+func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
+ listener := getProgressListener(options)
+ opts := []Option{ContentLength(request.PartSize)}
+ params := map[string]interface{}{}
+ params["partNumber"] = strconv.Itoa(request.PartNumber)
+ params["uploadId"] = request.InitResult.UploadID
+ resp, err := bucket.do("PUT", request.InitResult.Key, params, opts,
+ &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
+ if err != nil {
+ return &UploadPartResult{}, err
+ }
+ defer resp.Body.Close()
+
+ part := UploadPart{
+ ETag: resp.Headers.Get(HTTPHeaderEtag),
+ PartNumber: request.PartNumber,
+ }
+
+ if bucket.getConfig().IsEnableCRC {
+ err = checkCRC(resp, "DoUploadPart")
+ if err != nil {
+ return &UploadPartResult{part}, err
+ }
+ }
+
+ return &UploadPartResult{part}, nil
+}
+
+//
+// UploadPartCopy 拷贝分片。
+//
+// imur InitiateMultipartUpload成功后的返回值。
+// copySrc 源Object名称。
+// startPosition 本次拷贝片(Part)在源Object的起始位置。
+// partSize 本次拷贝片的大小。
+// partNumber 本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
+// options copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、
+// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince,具体含义请参看
+// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
+//
+// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber;
+// ETag及上传数据的MD5。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
+ startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
+ var out UploadPartCopyResult
+ var part UploadPart
+
+ opts := []Option{CopySource(srcBucketName, srcObjectKey),
+ CopySourceRange(startPosition, partSize)}
+ opts = append(opts, options...)
+ params := map[string]interface{}{}
+ params["partNumber"] = strconv.Itoa(partNumber)
+ params["uploadId"] = imur.UploadID
+ resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
+ if err != nil {
+ return part, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ if err != nil {
+ return part, err
+ }
+ part.ETag = out.ETag
+ part.PartNumber = partNumber
+
+ return part, nil
+}
+
+//
+// CompleteMultipartUpload 提交分片上传任务。
+//
+// imur InitiateMultipartUpload的返回值。
+// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
+//
+// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
+ parts []UploadPart) (CompleteMultipartUploadResult, error) {
+ var out CompleteMultipartUploadResult
+
+ sort.Sort(uploadParts(parts))
+ cxml := completeMultipartUploadXML{}
+ cxml.Part = parts
+ bs, err := xml.Marshal(cxml)
+ if err != nil {
+ return out, err
+ }
+ buffer := new(bytes.Buffer)
+ buffer.Write(bs)
+
+ params := map[string]interface{}{}
+ params["uploadId"] = imur.UploadID
+ resp, err := bucket.do("POST", imur.Key, params, nil, buffer, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// AbortMultipartUpload 取消分片上传任务。
+//
+// imur InitiateMultipartUpload的返回值。
+//
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
+ params := map[string]interface{}{}
+ params["uploadId"] = imur.UploadID
+ resp, err := bucket.do("DELETE", imur.Key, params, nil, nil, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+//
+// ListUploadedParts 列出指定上传任务已经上传的分片。
+//
+// imur InitiateMultipartUpload的返回值。
+//
+// ListUploadedPartsResponse 操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
+ var out ListUploadedPartsResult
+ params := map[string]interface{}{}
+ params["uploadId"] = imur.UploadID
+ resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ return out, err
+}
+
+//
+// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
+//
+// options ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000,
+// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
+//
+// ListMultipartUploadResponse 操作成功后的返回值,error为nil时该返回值有效。
+// error 操作成功error为nil,非nil为错误信息。
+//
+func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
+ var out ListMultipartUploadResult
+
+ options = append(options, EncodingType("url"))
+ params, err := getRawParams(options)
+ if err != nil {
+ return out, err
+ }
+ params["uploads"] = nil
+
+ resp, err := bucket.do("GET", "", params, nil, nil, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.Body.Close()
+
+ err = xmlUnmarshal(resp.Body, &out)
+ if err != nil {
+ return out, err
+ }
+ err = decodeListMultipartUploadResult(&out)
+ return out, err
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
new file mode 100644
index 000000000000..f0e613b8b3c0
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go
@@ -0,0 +1,386 @@
+package oss
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type optionType string
+
+const (
+ optionParam optionType = "HTTPParameter" // URL参数
+ optionHTTP optionType = "HTTPHeader" // HTTP头
+ optionArg optionType = "FuncArgument" // 函数参数
+)
+
+const (
+ deleteObjectsQuiet = "delete-objects-quiet"
+ routineNum = "x-routine-num"
+ checkpointConfig = "x-cp-config"
+ initCRC64 = "init-crc64"
+ progressListener = "x-progress-listener"
+ storageClass = "storage-class"
+)
+
+type (
+ optionValue struct {
+ Value interface{}
+ Type optionType
+ }
+
+ // Option http option
+ Option func(map[string]optionValue) error
+)
+
+// ACL is an option to set X-Oss-Acl header
+func ACL(acl ACLType) Option {
+ return setHeader(HTTPHeaderOssACL, string(acl))
+}
+
+// ContentType is an option to set Content-Type header
+func ContentType(value string) Option {
+ return setHeader(HTTPHeaderContentType, value)
+}
+
+// ContentLength is an option to set Content-Length header
+func ContentLength(length int64) Option {
+ return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10))
+}
+
+// CacheControl is an option to set Cache-Control header
+func CacheControl(value string) Option {
+ return setHeader(HTTPHeaderCacheControl, value)
+}
+
+// ContentDisposition is an option to set Content-Disposition header
+func ContentDisposition(value string) Option {
+ return setHeader(HTTPHeaderContentDisposition, value)
+}
+
+// ContentEncoding is an option to set Content-Encoding header
+func ContentEncoding(value string) Option {
+ return setHeader(HTTPHeaderContentEncoding, value)
+}
+
+// ContentMD5 is an option to set Content-MD5 header
+func ContentMD5(value string) Option {
+ return setHeader(HTTPHeaderContentMD5, value)
+}
+
+// Expires is an option to set Expires header
+func Expires(t time.Time) Option {
+ return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat))
+}
+
+// Meta is an option to set Meta header
+func Meta(key, value string) Option {
+ return setHeader(HTTPHeaderOssMetaPrefix+key, value)
+}
+
+// Range is an option to set Range header, [start, end]
+func Range(start, end int64) Option {
+ return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end))
+}
+
+// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
+func NormalizedRange(nr string) Option {
+ return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr)))
+}
+
+// AcceptEncoding is an option to set Accept-Encoding header
+func AcceptEncoding(value string) Option {
+ return setHeader(HTTPHeaderAcceptEncoding, value)
+}
+
+// IfModifiedSince is an option to set If-Modified-Since header
+func IfModifiedSince(t time.Time) Option {
+ return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat))
+}
+
+// IfUnmodifiedSince is an option to set If-Unmodified-Since header
+func IfUnmodifiedSince(t time.Time) Option {
+ return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat))
+}
+
+// IfMatch is an option to set If-Match header
+func IfMatch(value string) Option {
+ return setHeader(HTTPHeaderIfMatch, value)
+}
+
+// IfNoneMatch is an option to set IfNoneMatch header
+func IfNoneMatch(value string) Option {
+ return setHeader(HTTPHeaderIfNoneMatch, value)
+}
+
+// CopySource is an option to set X-Oss-Copy-Source header
+func CopySource(sourceBucket, sourceObject string) Option {
+ return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject)
+}
+
+// CopySourceRange is an option to set X-Oss-Copy-Source header
+func CopySourceRange(startPosition, partSize int64) Option {
+ val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" +
+ strconv.FormatInt((startPosition+partSize-1), 10)
+ return setHeader(HTTPHeaderOssCopySourceRange, val)
+}
+
+// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
+func CopySourceIfMatch(value string) Option {
+ return setHeader(HTTPHeaderOssCopySourceIfMatch, value)
+}
+
+// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
+func CopySourceIfNoneMatch(value string) Option {
+ return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value)
+}
+
+// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
+func CopySourceIfModifiedSince(t time.Time) Option {
+ return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat))
+}
+
+// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
+func CopySourceIfUnmodifiedSince(t time.Time) Option {
+ return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat))
+}
+
+// MetadataDirective is an option to set X-Oss-Metadata-Directive header
+func MetadataDirective(directive MetadataDirectiveType) Option {
+ return setHeader(HTTPHeaderOssMetadataDirective, string(directive))
+}
+
+// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
+func ServerSideEncryption(value string) Option {
+ return setHeader(HTTPHeaderOssServerSideEncryption, value)
+}
+
+// ObjectACL is an option to set X-Oss-Object-Acl header
+func ObjectACL(acl ACLType) Option {
+ return setHeader(HTTPHeaderOssObjectACL, string(acl))
+}
+
+// symlinkTarget is an option to set X-Oss-Symlink-Target
+func symlinkTarget(targetObjectKey string) Option {
+ return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey)
+}
+
+// Origin is an option to set Origin header
+func Origin(value string) Option {
+ return setHeader(HTTPHeaderOrigin, value)
+}
+
+// Delimiter is an option to set delimiler parameter
+func Delimiter(value string) Option {
+ return addParam("delimiter", value)
+}
+
+// Marker is an option to set marker parameter
+func Marker(value string) Option {
+ return addParam("marker", value)
+}
+
+// MaxKeys is an option to set maxkeys parameter
+func MaxKeys(value int) Option {
+ return addParam("max-keys", strconv.Itoa(value))
+}
+
+// Prefix is an option to set prefix parameter
+func Prefix(value string) Option {
+ return addParam("prefix", value)
+}
+
+// EncodingType is an option to set encoding-type parameter
+func EncodingType(value string) Option {
+ return addParam("encoding-type", value)
+}
+
+// MaxUploads is an option to set max-uploads parameter
+func MaxUploads(value int) Option {
+ return addParam("max-uploads", strconv.Itoa(value))
+}
+
+// KeyMarker is an option to set key-marker parameter
+func KeyMarker(value string) Option {
+ return addParam("key-marker", value)
+}
+
+// UploadIDMarker is an option to set upload-id-marker parameter
+func UploadIDMarker(value string) Option {
+ return addParam("upload-id-marker", value)
+}
+
+// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
+func DeleteObjectsQuiet(isQuiet bool) Option {
+ return addArg(deleteObjectsQuiet, isQuiet)
+}
+
+// StorageClass bucket的存储方式
+func StorageClass(value StorageClassType) Option {
+ return addArg(storageClass, value)
+}
+
+// 断点续传配置,包括是否启用、cp文件
+type cpConfig struct {
+ IsEnable bool
+ FilePath string
+}
+
+// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
+func Checkpoint(isEnable bool, filePath string) Option {
+ return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
+}
+
+// Routines DownloadFile/UploadFile并发数
+func Routines(n int) Option {
+ return addArg(routineNum, n)
+}
+
+// InitCRC AppendObject CRC的校验的初始值
+func InitCRC(initCRC uint64) Option {
+ return addArg(initCRC64, initCRC)
+}
+
+// Progress set progress listener
+func Progress(listener ProgressListener) Option {
+ return addArg(progressListener, listener)
+}
+
+// ResponseContentType is an option to set response-content-type param
+func ResponseContentType(value string) Option {
+ return addParam("response-content-type", value)
+}
+
+// ResponseContentLanguage is an option to set response-content-language param
+func ResponseContentLanguage(value string) Option {
+ return addParam("response-content-language", value)
+}
+
+// ResponseExpires is an option to set response-expires param
+func ResponseExpires(value string) Option {
+ return addParam("response-expires", value)
+}
+
+// ResponseCacheControl is an option to set response-cache-control param
+func ResponseCacheControl(value string) Option {
+ return addParam("response-cache-control", value)
+}
+
+// ResponseContentDisposition is an option to set response-content-disposition param
+func ResponseContentDisposition(value string) Option {
+ return addParam("response-content-disposition", value)
+}
+
+// ResponseContentEncoding is an option to set response-content-encoding param
+func ResponseContentEncoding(value string) Option {
+ return addParam("response-content-encoding", value)
+}
+
+// Process is an option to set X-Oss-Process param
+func Process(value string) Option {
+ return addParam("X-Oss-Process", value)
+}
+func setHeader(key string, value interface{}) Option {
+ return func(params map[string]optionValue) error {
+ if value == nil {
+ return nil
+ }
+ params[key] = optionValue{value, optionHTTP}
+ return nil
+ }
+}
+
+func addParam(key string, value interface{}) Option {
+ return func(params map[string]optionValue) error {
+ if value == nil {
+ return nil
+ }
+ params[key] = optionValue{value, optionParam}
+ return nil
+ }
+}
+
+func addArg(key string, value interface{}) Option {
+ return func(params map[string]optionValue) error {
+ if value == nil {
+ return nil
+ }
+ params[key] = optionValue{value, optionArg}
+ return nil
+ }
+}
+
+func handleOptions(headers map[string]string, options []Option) error {
+ params := map[string]optionValue{}
+ for _, option := range options {
+ if option != nil {
+ if err := option(params); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range params {
+ if v.Type == optionHTTP {
+ headers[k] = v.Value.(string)
+ }
+ }
+ return nil
+}
+
+func getRawParams(options []Option) (map[string]interface{}, error) {
+ // option
+ params := map[string]optionValue{}
+ for _, option := range options {
+ if option != nil {
+ if err := option(params); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ paramsm := map[string]interface{}{}
+ // serialize
+ for k, v := range params {
+ if v.Type == optionParam {
+ vs := params[k]
+ paramsm[k] = vs.Value.(string)
+ }
+ }
+
+ return paramsm, nil
+}
+
+func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) {
+ params := map[string]optionValue{}
+ for _, option := range options {
+ if option != nil {
+ if err := option(params); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if val, ok := params[param]; ok {
+ return val.Value, nil
+ }
+ return defaultVal, nil
+}
+
+func isOptionSet(options []Option, option string) (bool, interface{}, error) {
+ params := map[string]optionValue{}
+ for _, option := range options {
+ if option != nil {
+ if err := option(params); err != nil {
+ return false, nil, err
+ }
+ }
+ }
+
+ if val, ok := params[option]; ok {
+ return true, val.Value, nil
+ }
+ return false, nil, nil
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
new file mode 100644
index 000000000000..0ea897f039db
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go
@@ -0,0 +1,105 @@
+package oss
+
+import "io"
+
+// ProgressEventType transfer progress event type
+type ProgressEventType int
+
+const (
+ // TransferStartedEvent transfer started, set TotalBytes
+ TransferStartedEvent ProgressEventType = 1 + iota
+ // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
+ TransferDataEvent
+ // TransferCompletedEvent transfer completed
+ TransferCompletedEvent
+ // TransferFailedEvent transfer encounters an error
+ TransferFailedEvent
+)
+
+// ProgressEvent progress event
+type ProgressEvent struct {
+ ConsumedBytes int64
+ TotalBytes int64
+ EventType ProgressEventType
+}
+
+// ProgressListener listen progress change
+type ProgressListener interface {
+ ProgressChanged(event *ProgressEvent)
+}
+
+// -------------------- private --------------------
+
+func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
+ return &ProgressEvent{
+ ConsumedBytes: consumed,
+ TotalBytes: total,
+ EventType: eventType}
+}
+
+// publishProgress
+func publishProgress(listener ProgressListener, event *ProgressEvent) {
+ if listener != nil && event != nil {
+ listener.ProgressChanged(event)
+ }
+}
+
+type readerTracker struct {
+ completedBytes int64
+}
+
+type teeReader struct {
+ reader io.Reader
+ writer io.Writer
+ listener ProgressListener
+ consumedBytes int64
+ totalBytes int64
+ tracker *readerTracker
+}
+
+// TeeReader returns a Reader that writes to w what it reads from r.
+// All reads from r performed through it are matched with
+// corresponding writes to w. There is no internal buffering -
+// the write must complete before the read completes.
+// Any error encountered while writing is reported as a read error.
+func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
+ return &teeReader{
+ reader: reader,
+ writer: writer,
+ listener: listener,
+ consumedBytes: 0,
+ totalBytes: totalBytes,
+ tracker: tracker,
+ }
+}
+
+func (t *teeReader) Read(p []byte) (n int, err error) {
+ n, err = t.reader.Read(p)
+
+ // read encountered error
+ if err != nil && err != io.EOF {
+ event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
+ publishProgress(t.listener, event)
+ }
+
+ if n > 0 {
+ t.consumedBytes += int64(n)
+ // crc
+ if t.writer != nil {
+ if n, err := t.writer.Write(p[:n]); err != nil {
+ return n, err
+ }
+ }
+ // progress
+ if t.listener != nil {
+ event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
+ publishProgress(t.listener, event)
+ }
+ // track
+ if t.tracker != nil {
+ t.tracker.completedBytes = t.consumedBytes
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
new file mode 100644
index 000000000000..8ee1053e83ab
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go
@@ -0,0 +1,24 @@
+// +build !go1.7
+
+package oss
+
+import (
+ "net"
+ "net/http"
+)
+
+func newTransport(conn *Conn, config *Config) *http.Transport {
+ httpTimeOut := conn.config.HTTPTimeout
+ // new Transport
+ transport := &http.Transport{
+ Dial: func(netw, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+ if err != nil {
+ return nil, err
+ }
+ return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
+ },
+ ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
+ }
+ return transport
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
new file mode 100644
index 000000000000..3d655fd5fd67
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go
@@ -0,0 +1,25 @@
+// +build go1.7
+
+package oss
+
+import (
+ "net"
+ "net/http"
+)
+
+func newTransport(conn *Conn, config *Config) *http.Transport {
+ httpTimeOut := conn.config.HTTPTimeout
+ // new Transport
+ transport := &http.Transport{
+ Dial: func(netw, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)
+ if err != nil {
+ return nil, err
+ }
+ return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
+ },
+ IdleConnTimeout: httpTimeOut.IdleConnTimeout,
+ ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
+ }
+ return transport
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
new file mode 100644
index 000000000000..8df36f7b2275
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go
@@ -0,0 +1,450 @@
+package oss
+
+import (
+ "encoding/xml"
+ "net/url"
+ "time"
+)
+
+// ListBucketsResult ListBuckets请求返回的结果
+type ListBucketsResult struct {
+ XMLName xml.Name `xml:"ListAllMyBucketsResult"`
+ Prefix string `xml:"Prefix"` // 本次查询结果的前缀
+ Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点
+ MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点
+ IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回
+ NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点
+ Owner Owner `xml:"Owner"` // 拥有者信息
+ Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
+}
+
+// BucketProperties Bucket信息
+type BucketProperties struct {
+ XMLName xml.Name `xml:"Bucket"`
+ Name string `xml:"Name"` // Bucket名称
+ Location string `xml:"Location"` // Bucket所在的数据中心
+ CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
+ StorageClass string `xml:"StorageClass"` // Bucket的存储方式
+}
+
+// GetBucketACLResult GetBucketACL请求返回的结果
+type GetBucketACLResult struct {
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ ACL string `xml:"AccessControlList>Grant"` // Bucket权限
+ Owner Owner `xml:"Owner"` // Bucket拥有者信息
+}
+
+// LifecycleConfiguration Bucket的Lifecycle配置
+type LifecycleConfiguration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration"`
+ Rules []LifecycleRule `xml:"Rule"`
+}
+
+// LifecycleRule Lifecycle规则
+type LifecycleRule struct {
+ XMLName xml.Name `xml:"Rule"`
+ ID string `xml:"ID"` // 规则唯一的ID
+ Prefix string `xml:"Prefix"` // 规则所适用Object的前缀
+ Status string `xml:"Status"` // 规则是否生效
+ Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
+}
+
+// LifecycleExpiration 规则的过期属性
+type LifecycleExpiration struct {
+ XMLName xml.Name `xml:"Expiration"`
+ Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
+ Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效
+}
+
+type lifecycleXML struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration"`
+ Rules []lifecycleRule `xml:"Rule"`
+}
+
+type lifecycleRule struct {
+ XMLName xml.Name `xml:"Rule"`
+ ID string `xml:"ID"`
+ Prefix string `xml:"Prefix"`
+ Status string `xml:"Status"`
+ Expiration lifecycleExpiration `xml:"Expiration"`
+}
+
+type lifecycleExpiration struct {
+ XMLName xml.Name `xml:"Expiration"`
+ Days int `xml:"Days,omitempty"`
+ Date string `xml:"Date,omitempty"`
+}
+
+const expirationDateFormat = "2006-01-02T15:04:05.000Z"
+
+func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
+ rs := []lifecycleRule{}
+ for _, rule := range rules {
+ r := lifecycleRule{}
+ r.ID = rule.ID
+ r.Prefix = rule.Prefix
+ r.Status = rule.Status
+ if rule.Expiration.Date.IsZero() {
+ r.Expiration.Days = rule.Expiration.Days
+ } else {
+ r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
+ }
+ rs = append(rs, r)
+ }
+ return rs
+}
+
+// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
+func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
+ var statusStr = "Enabled"
+ if !status {
+ statusStr = "Disabled"
+ }
+ return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
+ Expiration: LifecycleExpiration{Days: days}}
+}
+
+// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
+func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
+ var statusStr = "Enabled"
+ if !status {
+ statusStr = "Disabled"
+ }
+ date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+ return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
+ Expiration: LifecycleExpiration{Date: date}}
+}
+
+// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
+type GetBucketLifecycleResult LifecycleConfiguration
+
+// RefererXML Referer配置
+type RefererXML struct {
+ XMLName xml.Name `xml:"RefererConfiguration"`
+ AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问
+ RefererList []string `xml:"RefererList>Referer"` // referer访问白名单
+}
+
+// GetBucketRefererResult GetBucketReferer请教返回结果
+type GetBucketRefererResult RefererXML
+
+// LoggingXML Logging配置
+type LoggingXML struct {
+ XMLName xml.Name `xml:"BucketLoggingStatus"`
+ LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
+}
+
+type loggingXMLEmpty struct {
+ XMLName xml.Name `xml:"BucketLoggingStatus"`
+}
+
+// LoggingEnabled 访问日志信息容器
+type LoggingEnabled struct {
+ XMLName xml.Name `xml:"LoggingEnabled"`
+ TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket
+ TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀
+}
+
+// GetBucketLoggingResult GetBucketLogging请求返回结果
+type GetBucketLoggingResult LoggingXML
+
+// WebsiteXML Website配置
+type WebsiteXML struct {
+ XMLName xml.Name `xml:"WebsiteConfiguration"`
+ IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
+ ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
+}
+
+// IndexDocument 目录URL时添加的索引文件
+type IndexDocument struct {
+ XMLName xml.Name `xml:"IndexDocument"`
+ Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名
+}
+
+// ErrorDocument 404错误时使用的文件
+type ErrorDocument struct {
+ XMLName xml.Name `xml:"ErrorDocument"`
+ Key string `xml:"Key"` // 404错误时使用的文件名
+}
+
+// GetBucketWebsiteResult GetBucketWebsite请求返回结果
+type GetBucketWebsiteResult WebsiteXML
+
+// CORSXML CORS配置
+type CORSXML struct {
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
+}
+
+// CORSRule CORS规则
+type CORSRule struct {
+ XMLName xml.Name `xml:"CORSRule"`
+ AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
+ AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
+ AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
+ ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头
+ MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间
+}
+
+// GetBucketCORSResult GetBucketCORS请求返回的结果
+type GetBucketCORSResult CORSXML
+
+// GetBucketInfoResult GetBucketInfo请求返回结果
+type GetBucketInfoResult struct {
+ XMLName xml.Name `xml:"BucketInfo"`
+ BucketInfo BucketInfo `xml:"Bucket"`
+}
+
+// BucketInfo Bucket信息
+type BucketInfo struct {
+ XMLName xml.Name `xml:"Bucket"`
+ Name string `xml:"Name"` // Bucket名称
+ Location string `xml:"Location"` // Bucket所在的数据中心
+ CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
+ ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名
+ IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名
+ ACL string `xml:"AccessControlList>Grant"` // Bucket权限
+ Owner Owner `xml:"Owner"` // Bucket拥有者信息
+ StorageClass string `xml:"StorageClass"` // Bucket存储类型
+}
+
+// ListObjectsResult ListObjects请求返回结果
+type ListObjectsResult struct {
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀
+ Marker string `xml:"Marker"` // 这次查询的起点
+ MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目
+ Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符
+ IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回
+ NextMarker string `xml:"NextMarker"` // 下一次查询的起点
+ Objects []ObjectProperties `xml:"Contents"` // Object类别
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
+}
+
+// ObjectProperties Objecct属性
+type ObjectProperties struct {
+ XMLName xml.Name `xml:"Contents"`
+ Key string `xml:"Key"` // Object的Key
+ Type string `xml:"Type"` // Object Type
+ Size int64 `xml:"Size"` // Object的长度字节数
+ ETag string `xml:"ETag"` // 标示Object的内容
+ Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器
+ LastModified time.Time `xml:"LastModified"` // Object最后修改时间
+ StorageClass string `xml:"StorageClass"` // Object的存储类型
+}
+
+// Owner Bucket/Object的owner
+type Owner struct {
+ XMLName xml.Name `xml:"Owner"`
+ ID string `xml:"ID"` // 用户ID
+ DisplayName string `xml:"DisplayName"` // Owner名字
+}
+
+// CopyObjectResult CopyObject请求返回的结果
+type CopyObjectResult struct {
+ XMLName xml.Name `xml:"CopyObjectResult"`
+ LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
+ ETag string `xml:"ETag"` // 新Object的ETag值
+}
+
+// GetObjectACLResult GetObjectACL请求返回的结果
+type GetObjectACLResult GetBucketACLResult
+
+type deleteXML struct {
+ XMLName xml.Name `xml:"Delete"`
+ Objects []DeleteObject `xml:"Object"` // 删除的所有Object
+ Quiet bool `xml:"Quiet"` // 安静响应模式
+}
+
+// DeleteObject 删除的Object
+type DeleteObject struct {
+ XMLName xml.Name `xml:"Object"`
+ Key string `xml:"Key"` // Object名称
+}
+
+// DeleteObjectsResult DeleteObjects请求返回结果
+type DeleteObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
+}
+
+// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
+type InitiateMultipartUploadResult struct {
+ XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
+ Bucket string `xml:"Bucket"` // Bucket名称
+ Key string `xml:"Key"` // 上传Object名称
+ UploadID string `xml:"UploadId"` // 生成的UploadId
+}
+
+// UploadPart 上传/拷贝的分片
+type UploadPart struct {
+ XMLName xml.Name `xml:"Part"`
+ PartNumber int `xml:"PartNumber"` // Part编号
+ ETag string `xml:"ETag"` // ETag缓存码
+}
+
+type uploadParts []UploadPart
+
+func (slice uploadParts) Len() int {
+ return len(slice)
+}
+
+func (slice uploadParts) Less(i, j int) bool {
+ return slice[i].PartNumber < slice[j].PartNumber
+}
+
+func (slice uploadParts) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// UploadPartCopyResult 拷贝分片请求返回的结果
+type UploadPartCopyResult struct {
+ XMLName xml.Name `xml:"CopyPartResult"`
+ LastModified time.Time `xml:"LastModified"` // 最后修改时间
+ ETag string `xml:"ETag"` // ETag
+}
+
+type completeMultipartUploadXML struct {
+ XMLName xml.Name `xml:"CompleteMultipartUpload"`
+ Part []UploadPart `xml:"Part"`
+}
+
+// CompleteMultipartUploadResult 提交分片上传任务返回结果
+type CompleteMultipartUploadResult struct {
+ XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
+ Location string `xml:"Location"` // Object的URL
+ Bucket string `xml:"Bucket"` // Bucket名称
+ ETag string `xml:"ETag"` // Object的ETag
+ Key string `xml:"Key"` // Object的名字
+}
+
+// ListUploadedPartsResult ListUploadedParts请求返回结果
+type ListUploadedPartsResult struct {
+ XMLName xml.Name `xml:"ListPartsResult"`
+ Bucket string `xml:"Bucket"` // Bucket名称
+ Key string `xml:"Key"` // Object名称
+ UploadID string `xml:"UploadId"` // 上传Id
+ NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置
+ MaxParts int `xml:"MaxParts"` // 最大Part个数
+ IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成
+ UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part
+}
+
+// UploadedPart 该任务已经上传的分片
+type UploadedPart struct {
+ XMLName xml.Name `xml:"Part"`
+ PartNumber int `xml:"PartNumber"` // Part编号
+ LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
+ ETag string `xml:"ETag"` // ETag缓存码
+ Size int `xml:"Size"` // Part大小
+}
+
+// ListMultipartUploadResult ListMultipartUpload请求返回结果
+type ListMultipartUploadResult struct {
+ XMLName xml.Name `xml:"ListMultipartUploadsResult"`
+ Bucket string `xml:"Bucket"` // Bucket名称
+ Delimiter string `xml:"Delimiter"` // 分组分割符
+ Prefix string `xml:"Prefix"` // 筛选前缀
+ KeyMarker string `xml:"KeyMarker"` // 起始Object位置
+ UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置
+ NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回,标明接下去的KeyMarker位置
+ NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回,标明接下去的UploadId位置
+ MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目
+ IsTruncated bool `xml:"IsTruncated"` // 是否完全返回
+ Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
+}
+
+// UncompletedUpload 未完成的Upload任务
+type UncompletedUpload struct {
+ XMLName xml.Name `xml:"Upload"`
+ Key string `xml:"Key"` // Object名称
+ UploadID string `xml:"UploadId"` // 对应UploadId
+ Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z
+}
+
+// 解析URL编码
+func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
+ var err error
+ for i := 0; i < len(result.DeletedObjects); i++ {
+ result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// 解析URL编码
+func decodeListObjectsResult(result *ListObjectsResult) error {
+ var err error
+ result.Prefix, err = url.QueryUnescape(result.Prefix)
+ if err != nil {
+ return err
+ }
+ result.Marker, err = url.QueryUnescape(result.Marker)
+ if err != nil {
+ return err
+ }
+ result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+ if err != nil {
+ return err
+ }
+ result.NextMarker, err = url.QueryUnescape(result.NextMarker)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < len(result.Objects); i++ {
+ result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key)
+ if err != nil {
+ return err
+ }
+ }
+ for i := 0; i < len(result.CommonPrefixes); i++ {
+ result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// 解析URL编码
+func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
+ var err error
+ result.Prefix, err = url.QueryUnescape(result.Prefix)
+ if err != nil {
+ return err
+ }
+ result.Delimiter, err = url.QueryUnescape(result.Delimiter)
+ if err != nil {
+ return err
+ }
+ result.KeyMarker, err = url.QueryUnescape(result.KeyMarker)
+ if err != nil {
+ return err
+ }
+ result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < len(result.Uploads); i++ {
+ result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key)
+ if err != nil {
+ return err
+ }
+ }
+ for i := 0; i < len(result.CommonPrefixes); i++ {
+ result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// createBucketConfiguration 规则的过期属性
+type createBucketConfiguration struct {
+ XMLName xml.Name `xml:"CreateBucketConfiguration"`
+ StorageClass StorageClassType `xml:"StorageClass,omitempty"`
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
new file mode 100644
index 000000000000..049ed82d9356
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go
@@ -0,0 +1,485 @@
+package oss
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "os"
+ "time"
+)
+
+//
+// UploadFile 分片上传文件
+//
+// objectKey object名称。
+// filePath 本地文件。需要上传的文件。
+// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
+// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
+//
+// error 操作成功为nil,非nil为错误信息。
+//
+func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
+ if partSize < MinPartSize || partSize > MaxPartSize {
+ return errors.New("oss: part size invalid range (1024KB, 5GB]")
+ }
+
+ cpConf, err := getCpConfig(options, filePath)
+ if err != nil {
+ return err
+ }
+
+ routines := getRoutines(options)
+
+ if cpConf.IsEnable {
+ return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
+ }
+
+ return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
+}
+
+// ----- 并发无断点的上传 -----
+
+// 获取Checkpoint配置
+func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
+ cpc := &cpConfig{}
+ cpcOpt, err := findOption(options, checkpointConfig, nil)
+ if err != nil || cpcOpt == nil {
+ return cpc, err
+ }
+
+ cpc = cpcOpt.(*cpConfig)
+ if cpc.IsEnable && cpc.FilePath == "" {
+ cpc.FilePath = filePath + CheckpointFileSuffix
+ }
+
+ return cpc, nil
+}
+
+// 获取并发数,默认并发数1
+func getRoutines(options []Option) int {
+ rtnOpt, err := findOption(options, routineNum, nil)
+ if err != nil || rtnOpt == nil {
+ return 1
+ }
+
+ rs := rtnOpt.(int)
+ if rs < 1 {
+ rs = 1
+ } else if rs > 100 {
+ rs = 100
+ }
+
+ return rs
+}
+
+// 获取进度回调
+func getProgressListener(options []Option) ProgressListener {
+ isSet, listener, _ := isOptionSet(options, progressListener)
+ if !isSet {
+ return nil
+ }
+ return listener.(ProgressListener)
+}
+
+// 测试使用
+type uploadPartHook func(id int, chunk FileChunk) error
+
+var uploadPartHooker uploadPartHook = defaultUploadPart
+
+func defaultUploadPart(id int, chunk FileChunk) error {
+ return nil
+}
+
+// 工作协程参数
+type workerArg struct {
+ bucket *Bucket
+ filePath string
+ imur InitiateMultipartUploadResult
+ hook uploadPartHook
+}
+
+// 工作协程
+func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
+ for chunk := range jobs {
+ if err := arg.hook(id, chunk); err != nil {
+ failed <- err
+ break
+ }
+ part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
+ if err != nil {
+ failed <- err
+ break
+ }
+ select {
+ case <-die:
+ return
+ default:
+ }
+ results <- part
+ }
+}
+
+// 调度协程
+func scheduler(jobs chan FileChunk, chunks []FileChunk) {
+ for _, chunk := range chunks {
+ jobs <- chunk
+ }
+ close(jobs)
+}
+
+func getTotalBytes(chunks []FileChunk) int64 {
+ var tb int64
+ for _, chunk := range chunks {
+ tb += chunk.Size
+ }
+ return tb
+}
+
+// 并发上传,不带断点续传功能
+func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
+ listener := getProgressListener(options)
+
+ chunks, err := SplitFileByPartSize(filePath, partSize)
+ if err != nil {
+ return err
+ }
+
+ // 初始化上传任务
+ imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
+ if err != nil {
+ return err
+ }
+
+ jobs := make(chan FileChunk, len(chunks))
+ results := make(chan UploadPart, len(chunks))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ var completedBytes int64
+ totalBytes := getTotalBytes(chunks)
+ event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+ for w := 1; w <= routines; w++ {
+ go worker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发上传分片
+ go scheduler(jobs, chunks)
+
+ // 等待分配分片上传完成
+ completed := 0
+ parts := make([]UploadPart, len(chunks))
+ for completed < len(chunks) {
+ select {
+ case part := <-results:
+ completed++
+ parts[part.PartNumber-1] = part
+ completedBytes += chunks[part.PartNumber-1].Size
+ event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+ bucket.AbortMultipartUpload(imur)
+ return err
+ }
+
+ if completed >= len(chunks) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
+ publishProgress(listener, event)
+
+ // 提交任务
+ _, err = bucket.CompleteMultipartUpload(imur, parts)
+ if err != nil {
+ bucket.AbortMultipartUpload(imur)
+ return err
+ }
+ return nil
+}
+
+// ----- 并发带断点的上传 -----
+const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
+
+type uploadCheckpoint struct {
+ Magic string // magic
+ MD5 string // cp内容的MD5
+ FilePath string // 本地文件
+ FileStat cpStat // 文件状态
+ ObjectKey string // key
+ UploadID string // upload id
+ Parts []cpPart // 本地文件的全部分片
+}
+
+type cpStat struct {
+ Size int64 // 文件大小
+ LastModified time.Time // 本地文件最后修改时间
+ MD5 string // 本地文件MD5
+}
+
+type cpPart struct {
+ Chunk FileChunk // 分片
+ Part UploadPart // 上传完成的分片
+ IsCompleted bool // upload是否完成
+}
+
+// CP数据是否有效,CP有效且文件没有更新时有效
+func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
+ // 比较CP的Magic及MD5
+ cpb := cp
+ cpb.MD5 = ""
+ js, _ := json.Marshal(cpb)
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+
+ if cp.Magic != uploadCpMagic || b64 != cp.MD5 {
+ return false, nil
+ }
+
+ // 确认本地文件是否更新
+ fd, err := os.Open(filePath)
+ if err != nil {
+ return false, err
+ }
+ defer fd.Close()
+
+ st, err := fd.Stat()
+ if err != nil {
+ return false, err
+ }
+
+ md, err := calcFileMD5(filePath)
+ if err != nil {
+ return false, err
+ }
+
+ // 比较文件大小/文件最后更新时间/文件MD5
+ if cp.FileStat.Size != st.Size() ||
+ cp.FileStat.LastModified != st.ModTime() ||
+ cp.FileStat.MD5 != md {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// 从文件中load
+func (cp *uploadCheckpoint) load(filePath string) error {
+ contents, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(contents, cp)
+ return err
+}
+
+// dump到文件
+func (cp *uploadCheckpoint) dump(filePath string) error {
+ bcp := *cp
+
+ // 计算MD5
+ bcp.MD5 = ""
+ js, err := json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+ sum := md5.Sum(js)
+ b64 := base64.StdEncoding.EncodeToString(sum[:])
+ bcp.MD5 = b64
+
+ // 序列化
+ js, err = json.Marshal(bcp)
+ if err != nil {
+ return err
+ }
+
+ // dump
+ return ioutil.WriteFile(filePath, js, FilePermMode)
+}
+
+// 更新分片状态
+func (cp *uploadCheckpoint) updatePart(part UploadPart) {
+ cp.Parts[part.PartNumber-1].Part = part
+ cp.Parts[part.PartNumber-1].IsCompleted = true
+}
+
+// 未完成的分片
+func (cp *uploadCheckpoint) todoParts() []FileChunk {
+ fcs := []FileChunk{}
+ for _, part := range cp.Parts {
+ if !part.IsCompleted {
+ fcs = append(fcs, part.Chunk)
+ }
+ }
+ return fcs
+}
+
+// 所有的分片
+func (cp *uploadCheckpoint) allParts() []UploadPart {
+ ps := []UploadPart{}
+ for _, part := range cp.Parts {
+ ps = append(ps, part.Part)
+ }
+ return ps
+}
+
+// 完成的字节数
+func (cp *uploadCheckpoint) getCompletedBytes() int64 {
+ var completedBytes int64
+ for _, part := range cp.Parts {
+ if part.IsCompleted {
+ completedBytes += part.Chunk.Size
+ }
+ }
+ return completedBytes
+}
+
+// 计算文件文件MD5
+func calcFileMD5(filePath string) (string, error) {
+ return "", nil
+}
+
+// 初始化分片上传
+func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
+ // cp
+ cp.Magic = uploadCpMagic
+ cp.FilePath = filePath
+ cp.ObjectKey = objectKey
+
+ // localfile
+ fd, err := os.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ st, err := fd.Stat()
+ if err != nil {
+ return err
+ }
+ cp.FileStat.Size = st.Size()
+ cp.FileStat.LastModified = st.ModTime()
+ md, err := calcFileMD5(filePath)
+ if err != nil {
+ return err
+ }
+ cp.FileStat.MD5 = md
+
+ // chunks
+ parts, err := SplitFileByPartSize(filePath, partSize)
+ if err != nil {
+ return err
+ }
+
+ cp.Parts = make([]cpPart, len(parts))
+ for i, part := range parts {
+ cp.Parts[i].Chunk = part
+ cp.Parts[i].IsCompleted = false
+ }
+
+ // init load
+ imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
+ if err != nil {
+ return err
+ }
+ cp.UploadID = imur.UploadID
+
+ return nil
+}
+
+// 提交分片上传,删除CP文件
+func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+ imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
+ Key: cp.ObjectKey, UploadID: cp.UploadID}
+ _, err := bucket.CompleteMultipartUpload(imur, parts)
+ if err != nil {
+ return err
+ }
+ os.Remove(cpFilePath)
+ return err
+}
+
+// 并发带断点的上传
+func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
+ listener := getProgressListener(options)
+
+ // LOAD CP数据
+ ucp := uploadCheckpoint{}
+ err := ucp.load(cpFilePath)
+ if err != nil {
+ os.Remove(cpFilePath)
+ }
+
+ // LOAD出错或数据无效重新初始化上传
+ valid, err := ucp.isValid(filePath)
+ if err != nil || !valid {
+ if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
+ return err
+ }
+ os.Remove(cpFilePath)
+ }
+
+ chunks := ucp.todoParts()
+ imur := InitiateMultipartUploadResult{
+ Bucket: bucket.BucketName,
+ Key: objectKey,
+ UploadID: ucp.UploadID}
+
+ jobs := make(chan FileChunk, len(chunks))
+ results := make(chan UploadPart, len(chunks))
+ failed := make(chan error)
+ die := make(chan bool)
+
+ completedBytes := ucp.getCompletedBytes()
+ event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
+ publishProgress(listener, event)
+
+ // 启动工作协程
+ arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+ for w := 1; w <= routines; w++ {
+ go worker(w, arg, jobs, results, failed, die)
+ }
+
+ // 并发上传分片
+ go scheduler(jobs, chunks)
+
+ // 等待分配分片上传完成
+ completed := 0
+ for completed < len(chunks) {
+ select {
+ case part := <-results:
+ completed++
+ ucp.updatePart(part)
+ ucp.dump(cpFilePath)
+ completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
+ event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size)
+ publishProgress(listener, event)
+ case err := <-failed:
+ close(die)
+ event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size)
+ publishProgress(listener, event)
+ return err
+ }
+
+ if completed >= len(chunks) {
+ break
+ }
+ }
+
+ event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
+ publishProgress(listener, event)
+
+ // 提交分片上传
+ err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
+ return err
+}
diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
new file mode 100644
index 000000000000..646f6da581f6
--- /dev/null
+++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go
@@ -0,0 +1,265 @@
+package oss
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc64"
+ "net/http"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Get User Agent
+// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本
+var userAgent = func() string {
+ sys := getSysInfo()
+ return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
+ sys.release, sys.machine, runtime.Version())
+}()
+
+type sysInfo struct {
+ name string // 操作系统名称windows/Linux
+ release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
+ machine string // 机器类型amd64/x86_64
+}
+
+// Get system info
+// 获取操作系统信息、机器类型
+func getSysInfo() sysInfo {
+ name := runtime.GOOS
+ release := "-"
+ machine := runtime.GOARCH
+ if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil {
+ name = string(bytes.TrimSpace(out))
+ }
+ if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil {
+ release = string(bytes.TrimSpace(out))
+ }
+ if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil {
+ machine = string(bytes.TrimSpace(out))
+ }
+ return sysInfo{name: name, release: release, machine: machine}
+}
+
+// unpackedRange
+type unpackedRange struct {
+ hasStart bool // 是否指定了起点
+ hasEnd bool // 是否指定了终点
+ start int64 // 起点
+ end int64 // 终点
+}
+
+// invalid Range Error
+func invalidRangeError(r string) error {
+ return fmt.Errorf("InvalidRange %s", r)
+}
+
+// parseRange parse various styles of range such as bytes=M-N
+func parseRange(normalizedRange string) (*unpackedRange, error) {
+ var err error
+ hasStart := false
+ hasEnd := false
+ var start int64
+ var end int64
+
+ // bytes==M-N or ranges=M-N
+ nrSlice := strings.Split(normalizedRange, "=")
+ if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
+ return nil, invalidRangeError(normalizedRange)
+ }
+
+ // bytes=M-N,X-Y
+ rSlice := strings.Split(nrSlice[1], ",")
+ rStr := rSlice[0]
+
+ if strings.HasSuffix(rStr, "-") { // M-
+ startStr := rStr[:len(rStr)-1]
+ start, err = strconv.ParseInt(startStr, 10, 64)
+ if err != nil {
+ return nil, invalidRangeError(normalizedRange)
+ }
+ hasStart = true
+ } else if strings.HasPrefix(rStr, "-") { // -N
+ len := rStr[1:]
+ end, err = strconv.ParseInt(len, 10, 64)
+ if err != nil {
+ return nil, invalidRangeError(normalizedRange)
+ }
+ if end == 0 { // -0
+ return nil, invalidRangeError(normalizedRange)
+ }
+ hasEnd = true
+ } else { // M-N
+ valSlice := strings.Split(rStr, "-")
+ if len(valSlice) != 2 {
+ return nil, invalidRangeError(normalizedRange)
+ }
+ start, err = strconv.ParseInt(valSlice[0], 10, 64)
+ if err != nil {
+ return nil, invalidRangeError(normalizedRange)
+ }
+ hasStart = true
+ end, err = strconv.ParseInt(valSlice[1], 10, 64)
+ if err != nil {
+ return nil, invalidRangeError(normalizedRange)
+ }
+ hasEnd = true
+ }
+
+ return &unpackedRange{hasStart, hasEnd, start, end}, nil
+}
+
+// adjustRange return adjusted range, adjust the range according to the length of the file
+func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
+ if ur == nil {
+ return 0, size
+ }
+
+ if ur.hasStart && ur.hasEnd {
+ start = ur.start
+ end = ur.end + 1
+ if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end {
+ start = 0
+ end = size
+ }
+ } else if ur.hasStart {
+ start = ur.start
+ end = size
+ if ur.start < 0 || ur.start >= size {
+ start = 0
+ }
+ } else if ur.hasEnd {
+ start = size - ur.end
+ end = size
+ if ur.end < 0 || ur.end > size {
+ start = 0
+ end = size
+ }
+ }
+ return
+}
+
+// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
+// 获取当前时间,从UTC开始的秒数。
+func GetNowSec() int64 {
+ return time.Now().Unix()
+}
+
+// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC. The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64. Note that this
+// means the result of calling UnixNano on the zero Time is undefined.
+// 获取当前时间,从UTC开始的纳秒。
+func GetNowNanoSec() int64 {
+ return time.Now().UnixNano()
+}
+
+// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式
+func GetNowGMT() string {
+ return time.Now().UTC().Format(http.TimeFormat)
+}
+
+// FileChunk 文件片定义
+type FileChunk struct {
+ Number int // 块序号
+ Offset int64 // 块在文件中的偏移量
+ Size int64 // 块大小
+}
+
+// SplitFileByPartNum Split big file to part by the num of part
+// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。
+func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
+ if chunkNum <= 0 || chunkNum > 10000 {
+ return nil, errors.New("chunkNum invalid")
+ }
+
+ file, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ if int64(chunkNum) > stat.Size() {
+ return nil, errors.New("oss: chunkNum invalid")
+ }
+
+ var chunks []FileChunk
+ var chunk = FileChunk{}
+ var chunkN = (int64)(chunkNum)
+ for i := int64(0); i < chunkN; i++ {
+ chunk.Number = int(i + 1)
+ chunk.Offset = i * (stat.Size() / chunkN)
+ if i == chunkN-1 {
+ chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN
+ } else {
+ chunk.Size = stat.Size() / chunkN
+ }
+ chunks = append(chunks, chunk)
+ }
+
+ return chunks, nil
+}
+
+// SplitFileByPartSize Split big file to part by the size of part
+// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。
+func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
+ if chunkSize <= 0 {
+ return nil, errors.New("chunkSize invalid")
+ }
+
+ file, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ var chunkN = stat.Size() / chunkSize
+ if chunkN >= 10000 {
+ return nil, errors.New("Too many parts, please increase part size.")
+ }
+
+ var chunks []FileChunk
+ var chunk = FileChunk{}
+ for i := int64(0); i < chunkN; i++ {
+ chunk.Number = int(i + 1)
+ chunk.Offset = i * chunkSize
+ chunk.Size = chunkSize
+ chunks = append(chunks, chunk)
+ }
+
+ if stat.Size()%chunkSize > 0 {
+ chunk.Number = len(chunks) + 1
+ chunk.Offset = int64(len(chunks)) * chunkSize
+ chunk.Size = stat.Size() % chunkSize
+ chunks = append(chunks, chunk)
+ }
+
+ return chunks, nil
+}
+
+// GetPartEnd 计算结束位置
+func GetPartEnd(begin int64, total int64, per int64) int64 {
+ if begin+per > total {
+ return total - 1
+ }
+ return begin + per - 1
+}
+
+// crcTable returns the Table constructed from the specified polynomial
+var crcTable = func() *crc64.Table {
+ return crc64.MakeTable(crc64.ECMA)
+}
diff --git a/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/denverdino/aliyungo/LICENSE.txt
new file mode 100644
index 000000000000..918297133253
--- /dev/null
+++ b/vendor/github.com/denverdino/aliyungo/LICENSE.txt
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015-2015 Li Yi (denverdino@gmail.com).
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go
new file mode 100755
index 000000000000..80841eb5f120
--- /dev/null
+++ b/vendor/github.com/denverdino/aliyungo/common/client.go
@@ -0,0 +1,498 @@
+package common
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/denverdino/aliyungo/util"
+)
+
+// RemovalPolicy.N add index to array item
+// RemovalPolicy=["a", "b"] => RemovalPolicy.1="a" RemovalPolicy.2="b"
+type FlattenArray []string
+
+// string contains underline which will be replaced with dot
+// SystemDisk_Category => SystemDisk.Category
+type UnderlineString string
+
+// A Client represents a client of ECS services
+type Client struct {
+ AccessKeyId string //Access Key Id
+ AccessKeySecret string //Access Key Secret
+ securityToken string
+ debug bool
+ httpClient *http.Client
+ endpoint string
+ version string
+ serviceCode string
+ regionID Region
+ businessInfo string
+ userAgent string
+}
+
+// Initialize properties of a client instance
+func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) {
+ client.AccessKeyId = accessKeyId
+ client.AccessKeySecret = accessKeySecret + "&"
+ client.debug = false
+ handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout"))
+ if err != nil {
+ handshakeTimeout = 0
+ }
+ if handshakeTimeout == 0 {
+ client.httpClient = &http.Client{}
+ } else {
+ t := &http.Transport{
+ TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second}
+ client.httpClient = &http.Client{Transport: t}
+ }
+ client.endpoint = endpoint
+ client.version = version
+}
+
+// Initialize properties of a client instance including regionID
+func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) {
+ client.Init(endpoint, version, accessKeyId, accessKeySecret)
+ client.serviceCode = serviceCode
+ client.regionID = regionID
+ client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret)
+}
+
+// Intialize client object when all properties are ready
+func (client *Client) InitClient() *Client {
+ client.debug = false
+ handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout"))
+ if err != nil {
+ handshakeTimeout = 0
+ }
+ if handshakeTimeout == 0 {
+ client.httpClient = &http.Client{}
+ } else {
+ t := &http.Transport{
+ TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second}
+ client.httpClient = &http.Client{Transport: t}
+ }
+ client.setEndpointByLocation(client.regionID, client.serviceCode, client.AccessKeyId, client.AccessKeySecret)
+ return client
+}
+
+func (client *Client) NewInitForAssumeRole(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region, securityToken string) {
+ client.NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode, regionID)
+ client.securityToken = securityToken
+}
+
+//NewClient using location service
+func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret string) {
+ locationClient := NewLocationClient(accessKeyId, accessKeySecret)
+ ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode)
+ if ep == "" {
+ ep = loadEndpointFromFile(region, serviceCode)
+ }
+
+ if ep != "" {
+ client.endpoint = ep
+ }
+}
+
+// Ensure all necessary properties are valid
+func (client *Client) ensureProperties() error {
+ var msg string
+
+ if client.endpoint == "" {
+ msg = fmt.Sprintf("endpoint cannot be empty!")
+ } else if client.version == "" {
+ msg = fmt.Sprintf("version cannot be empty!")
+ } else if client.AccessKeyId == "" {
+ msg = fmt.Sprintf("AccessKeyId cannot be empty!")
+ } else if client.AccessKeySecret == "" {
+ msg = fmt.Sprintf("AccessKeySecret cannot be empty!")
+ }
+
+ if msg != "" {
+ return errors.New(msg)
+ }
+
+ return nil
+}
+
+// ----------------------------------------------------
+// WithXXX methods
+// ----------------------------------------------------
+
+// WithEndpoint sets custom endpoint
+func (client *Client) WithEndpoint(endpoint string) *Client {
+ client.SetEndpoint(endpoint)
+ return client
+}
+
+// WithVersion sets custom version
+func (client *Client) WithVersion(version string) *Client {
+ client.SetVersion(version)
+ return client
+}
+
+// WithRegionID sets Region ID
+func (client *Client) WithRegionID(regionID Region) *Client {
+ client.SetRegionID(regionID)
+ return client
+}
+
+//WithServiceCode sets serviceCode
+func (client *Client) WithServiceCode(serviceCode string) *Client {
+ client.SetServiceCode(serviceCode)
+ return client
+}
+
+// WithAccessKeyId sets new AccessKeyId
+func (client *Client) WithAccessKeyId(id string) *Client {
+ client.SetAccessKeyId(id)
+ return client
+}
+
+// WithAccessKeySecret sets new AccessKeySecret
+func (client *Client) WithAccessKeySecret(secret string) *Client {
+ client.SetAccessKeySecret(secret)
+ return client
+}
+
+// WithSecurityToken sets securityToken
+func (client *Client) WithSecurityToken(securityToken string) *Client {
+ client.SetSecurityToken(securityToken)
+ return client
+}
+
+// WithDebug sets debug mode to log the request/response message
+func (client *Client) WithDebug(debug bool) *Client {
+ client.SetDebug(debug)
+ return client
+}
+
+// WithBusinessInfo sets business info to log the request/response message
+func (client *Client) WithBusinessInfo(businessInfo string) *Client {
+ client.SetBusinessInfo(businessInfo)
+ return client
+}
+
+// WithUserAgent sets user agent to the request/response message
+func (client *Client) WithUserAgent(userAgent string) *Client {
+ client.SetUserAgent(userAgent)
+ return client
+}
+
+// ----------------------------------------------------
+// SetXXX methods
+// ----------------------------------------------------
+
+// SetEndpoint sets custom endpoint
+func (client *Client) SetEndpoint(endpoint string) {
+ client.endpoint = endpoint
+}
+
+// SetEndpoint sets custom version
+func (client *Client) SetVersion(version string) {
+ client.version = version
+}
+
+// SetEndpoint sets Region ID
+func (client *Client) SetRegionID(regionID Region) {
+ client.regionID = regionID
+}
+
+//SetServiceCode sets serviceCode
+func (client *Client) SetServiceCode(serviceCode string) {
+ client.serviceCode = serviceCode
+}
+
+// SetAccessKeyId sets new AccessKeyId
+func (client *Client) SetAccessKeyId(id string) {
+ client.AccessKeyId = id
+}
+
+// SetAccessKeySecret sets new AccessKeySecret
+func (client *Client) SetAccessKeySecret(secret string) {
+ client.AccessKeySecret = secret + "&"
+}
+
+// SetDebug sets debug mode to log the request/response message
+func (client *Client) SetDebug(debug bool) {
+ client.debug = debug
+}
+
+// SetBusinessInfo sets business info to log the request/response message
+func (client *Client) SetBusinessInfo(businessInfo string) {
+ if strings.HasPrefix(businessInfo, "/") {
+ client.businessInfo = businessInfo
+ } else if businessInfo != "" {
+ client.businessInfo = "/" + businessInfo
+ }
+}
+
+// SetUserAgent sets user agent to the request/response message
+func (client *Client) SetUserAgent(userAgent string) {
+ client.userAgent = userAgent
+}
+
+//set SecurityToken
+func (client *Client) SetSecurityToken(securityToken string) {
+ client.securityToken = securityToken
+}
+
+// Invoke sends the raw HTTP request for ECS services
+func (client *Client) Invoke(action string, args interface{}, response interface{}) error {
+ if err := client.ensureProperties(); err != nil {
+ return err
+ }
+
+ request := Request{}
+ request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
+
+ query := util.ConvertToQueryValues(request)
+ util.SetQueryValues(args, &query)
+
+ // Sign request
+ signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret)
+
+ // Generate the request URL
+ requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature)
+
+ httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil)
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ // TODO move to util and add build val flag
+ httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
+
+ httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent)
+
+ t0 := time.Now()
+ httpResp, err := client.httpClient.Do(httpReq)
+ t1 := time.Now()
+ if err != nil {
+ return GetClientError(err)
+ }
+ statusCode := httpResp.StatusCode
+
+ if client.debug {
+ log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0))
+ }
+
+ defer httpResp.Body.Close()
+ body, err := ioutil.ReadAll(httpResp.Body)
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ if client.debug {
+ var prettyJSON bytes.Buffer
+ err = json.Indent(&prettyJSON, body, "", " ")
+ log.Println(string(prettyJSON.Bytes()))
+ }
+
+ if statusCode >= 400 && statusCode <= 599 {
+ errorResponse := ErrorResponse{}
+ err = json.Unmarshal(body, &errorResponse)
+ ecsError := &Error{
+ ErrorResponse: errorResponse,
+ StatusCode: statusCode,
+ }
+ return ecsError
+ }
+
+ err = json.Unmarshal(body, response)
+ //log.Printf("%++v", response)
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ return nil
+}
+
+// Invoke sends the raw HTTP request for ECS services
+func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error {
+ if err := client.ensureProperties(); err != nil {
+ return err
+ }
+
+ request := Request{}
+ request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
+
+ query := util.ConvertToQueryValues(request)
+
+ util.SetQueryValueByFlattenMethod(args, &query)
+
+ // Sign request
+ signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret)
+
+ // Generate the request URL
+ requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature)
+
+ httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil)
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ // TODO move to util and add build val flag
+ httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
+
+ httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent)
+
+ t0 := time.Now()
+ httpResp, err := client.httpClient.Do(httpReq)
+ t1 := time.Now()
+ if err != nil {
+ return GetClientError(err)
+ }
+ statusCode := httpResp.StatusCode
+
+ if client.debug {
+ log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0))
+ }
+
+ defer httpResp.Body.Close()
+ body, err := ioutil.ReadAll(httpResp.Body)
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ if client.debug {
+ var prettyJSON bytes.Buffer
+ err = json.Indent(&prettyJSON, body, "", " ")
+ log.Println(string(prettyJSON.Bytes()))
+ }
+
+ if statusCode >= 400 && statusCode <= 599 {
+ errorResponse := ErrorResponse{}
+ err = json.Unmarshal(body, &errorResponse)
+ ecsError := &Error{
+ ErrorResponse: errorResponse,
+ StatusCode: statusCode,
+ }
+ return ecsError
+ }
+
+ err = json.Unmarshal(body, response)
+ //log.Printf("%++v", response)
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ return nil
+}
+
+// Invoke sends the raw HTTP request for ECS services
+//改进了一下上面那个方法,可以使用各种Http方法
+//2017.1.30 增加了一个path参数,用来拓展访问的地址
+func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error {
+ if err := client.ensureProperties(); err != nil {
+ return err
+ }
+
+ request := Request{}
+ request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
+ data := util.ConvertToQueryValues(request)
+ util.SetQueryValues(args, &data)
+
+ // Sign request
+ signature := util.CreateSignatureForRequest(method, &data, client.AccessKeySecret)
+
+ data.Add("Signature", signature)
+ // Generate the request URL
+ var (
+ httpReq *http.Request
+ err error
+ )
+ if method == http.MethodGet {
+ requestURL := client.endpoint + path + "?" + data.Encode()
+ //fmt.Println(requestURL)
+ httpReq, err = http.NewRequest(method, requestURL, nil)
+ } else {
+ //fmt.Println(client.endpoint + path)
+ httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode()))
+ httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ }
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ // TODO move to util and add build val flag
+ httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
+ httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+" "+client.userAgent)
+
+ t0 := time.Now()
+ httpResp, err := client.httpClient.Do(httpReq)
+ t1 := time.Now()
+ if err != nil {
+ return GetClientError(err)
+ }
+ statusCode := httpResp.StatusCode
+
+ if client.debug {
+ log.Printf("Invoke %s %s %d (%v) %v", ECSRequestMethod, client.endpoint, statusCode, t1.Sub(t0), data.Encode())
+ }
+
+ defer httpResp.Body.Close()
+ body, err := ioutil.ReadAll(httpResp.Body)
+
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ if client.debug {
+ var prettyJSON bytes.Buffer
+ err = json.Indent(&prettyJSON, body, "", " ")
+ log.Println(string(prettyJSON.Bytes()))
+ }
+
+ if statusCode >= 400 && statusCode <= 599 {
+ errorResponse := ErrorResponse{}
+ err = json.Unmarshal(body, &errorResponse)
+ ecsError := &Error{
+ ErrorResponse: errorResponse,
+ StatusCode: statusCode,
+ }
+ return ecsError
+ }
+
+ err = json.Unmarshal(body, response)
+ //log.Printf("%++v", response)
+ if err != nil {
+ return GetClientError(err)
+ }
+
+ return nil
+}
+
+// GenerateClientToken generates the Client Token with random string
+func (client *Client) GenerateClientToken() string {
+ return util.CreateRandomString()
+}
+
+func GetClientErrorFromString(str string) error {
+ return &Error{
+ ErrorResponse: ErrorResponse{
+ Code: "AliyunGoClientFailure",
+ Message: str,
+ },
+ StatusCode: -1,
+ }
+}
+
+func GetClientError(err error) error {
+ return GetClientErrorFromString(err.Error())
+}
diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoint.go b/vendor/github.com/denverdino/aliyungo/common/endpoint.go
new file mode 100644
index 000000000000..16bcbf9d620b
--- /dev/null
+++ b/vendor/github.com/denverdino/aliyungo/common/endpoint.go
@@ -0,0 +1,118 @@
+package common
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const (
+ // LocationDefaultEndpoint is the default API endpoint of Location services
+ locationDefaultEndpoint = "https://location.aliyuncs.com"
+ locationAPIVersion = "2015-06-12"
+ HTTP_PROTOCOL = "http"
+ HTTPS_PROTOCOL = "https"
+)
+
+var (
+ endpoints = make(map[Region]map[string]string)
+)
+
+//init endpoints from file
+func init() {
+
+}
+
+func NewLocationClient(accessKeyId, accessKeySecret string) *Client {
+ endpoint := os.Getenv("LOCATION_ENDPOINT")
+ if endpoint == "" {
+ endpoint = locationDefaultEndpoint
+ }
+
+ client := &Client{}
+ client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret)
+ return client
+}
+
+func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) {
+ response := &DescribeEndpointResponse{}
+ err := client.Invoke("DescribeEndpoint", args, response)
+ if err != nil {
+ return nil, err
+ }
+ return response, err
+}
+
+func getProductRegionEndpoint(region Region, serviceCode string) string {
+ if sp, ok := endpoints[region]; ok {
+ if endpoint, ok := sp[serviceCode]; ok {
+ return endpoint
+ }
+ }
+
+ return ""
+}
+
+func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) {
+ endpoints[region] = map[string]string{
+ serviceCode: endpoint,
+ }
+}
+
+func (client *Client) DescribeOpenAPIEndpoint(region Region, serviceCode string) string {
+ if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" {
+ return endpoint
+ }
+
+ defaultProtocols := HTTP_PROTOCOL
+
+ args := &DescribeEndpointArgs{
+ Id: region,
+ ServiceCode: serviceCode,
+ Type: "openAPI",
+ }
+
+ endpoint, err := client.DescribeEndpoint(args)
+ if err != nil || endpoint.Endpoint == "" {
+ return ""
+ }
+
+ for _, protocol := range endpoint.Protocols.Protocols {
+ if strings.ToLower(protocol) == HTTPS_PROTOCOL {
+ defaultProtocols = HTTPS_PROTOCOL
+ break
+ }
+ }
+
+ ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoint)
+
+ setProductRegionEndpoint(region, serviceCode, ep)
+ return ep
+}
+
+func loadEndpointFromFile(region Region, serviceCode string) string {
+ data, err := ioutil.ReadFile("./endpoints.xml")
+ if err != nil {
+ return ""
+ }
+
+ var endpoints Endpoints
+ err = xml.Unmarshal(data, &endpoints)
+ if err != nil {
+ return ""
+ }
+
+ for _, endpoint := range endpoints.Endpoint {
+ if endpoint.RegionIds.RegionId == string(region) {
+ for _, product := range endpoint.Products.Product {
+ if strings.ToLower(product.ProductName) == serviceCode {
+ return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName)
+ }
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml
new file mode 100644
index 000000000000..4079bcd2b508
--- /dev/null
+++ b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml
@@ -0,0 +1,1349 @@
+
+